From b5ee27212e268395a2224389d519aacfc5768919 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 26 Oct 2021 10:03:05 -0700 Subject: [PATCH 001/133] docs: remove bug_report template (#10002) --- .github/ISSUE_TEMPLATE/BUG_REPORT.yml | 3 +- .github/ISSUE_TEMPLATE/Bug_report.md | 46 --------------------------- 2 files changed, 1 insertion(+), 48 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/Bug_report.md diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml index eb6187bc2f382..a9b657f105056 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -1,6 +1,5 @@ name: Bug Report -description: File a bug report -title: "[Bug]: " +description: Create a bug report to help us improve labels: ["bug"] body: - type: markdown diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md deleted file mode 100644 index 28c6237ac75d1..0000000000000 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Bug report -labels: bug -about: Create a report to help us improve - ---- - - -### Relevant telegraf.conf: - -```toml - -``` - -### System info: - - - -### Docker - - - -### Steps to reproduce: - - - -1. ... -2. ... - -### Expected behavior: - - - -### Actual behavior: - - - -### Additional info: - - From 38aefd99b55450a6338c3e843487712110c2f3d2 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 26 Oct 2021 11:03:41 -0600 Subject: [PATCH 002/133] fix: redacts IPMI password in logs (#9997) --- plugins/inputs/ipmi_sensor/ipmi.go | 14 ++++++-- plugins/inputs/ipmi_sensor/ipmi_test.go | 48 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index d26e739e96d43..801188130c960 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -151,7 +151,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { cmd := execCommand(name, dumpOpts...) out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } } opts = append(opts, "-S") @@ -170,7 +170,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { return m.parseV2(acc, hostname, out, timestamp) @@ -315,6 +315,16 @@ func aToFloat(val string) (float64, error) { return f, nil } +func sanitizeIPMICmd(args []string) []string { + for i, v := range args { + if v == "-P" { + args[i+1] = "REDACTED" + } + } + + return args +} + func trim(s string) string { return strings.TrimSpace(s) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 4a2910101ab82..504a7467f5130 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -779,3 +779,51 @@ func Test_parseV2(t *testing.T) { }) } } + +func TestSanitizeIPMICmd(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "default args", + args: []string{ + "-H", "localhost", + "-U", "username", + "-P", "password", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-P", "REDACTED", + "-I", "lan", + }, + }, + { + name: "no password", + args: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + }, + { + name: "empty args", + args: []string{}, + expected: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sanitizedArgs []string = sanitizeIPMICmd(tt.args) + require.Equal(t, tt.expected, sanitizedArgs) + }) + } +} From 7bf8343c60e5ff4b9093be489a0ebd3a4ccd9c3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Oct 2021 12:15:18 -0500 Subject: [PATCH 003/133] fix: bump github.com/aws/aws-sdk-go-v2/config from 1.8.2 to 1.8.3 (#9948) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 16 ++++++++-------- go.sum | 24 ++++++++++++++++-------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index a7fd28cd6462e..573106c6bf3ad 100644 --- a/go.mod +++ b/go.mod @@ -47,25 +47,25 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect - github.com/aws/aws-sdk-go-v2 v1.9.1 - github.com/aws/aws-sdk-go-v2/config v1.8.2 - github.com/aws/aws-sdk-go-v2/credentials v1.4.2 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 + github.com/aws/aws-sdk-go-v2 v1.9.2 + github.com/aws/aws-sdk-go-v2/config v1.8.3 + github.com/aws/aws-sdk-go-v2/credentials v1.4.3 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 // indirect github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 + github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.1.0 diff --git a/go.sum b/go.sum index 63dfa7dbed880..b138256dd6834 100644 --- a/go.sum +++ b/go.sum @@ -308,25 +308,29 @@ github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= -github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= @@ -338,8 +342,9 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6 github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= @@ -363,8 +368,9 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PIS github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= @@ -376,13 +382,15 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0d github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= From 5d2b5d15a7af8f98ba45ac65812d502b520a0557 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20Dupuy?= Date: Tue, 26 Oct 2021 20:07:02 +0200 Subject: [PATCH 004/133] feat: add additional metrics to support elastic pool (sqlserver plugin) (#9841) --- plugins/inputs/sqlserver/README.md | 310 +++++----- ...zuresqlqueries.go => azuresqldbqueries.go} | 545 +---------------- .../sqlserver/azuresqlmanagedqueries.go | 546 ++++++++++++++++++ .../inputs/sqlserver/azuresqlpoolqueries.go | 477 +++++++++++++++ .../sqlserver/azuresqlpoolqueries_test.go | 312 ++++++++++ plugins/inputs/sqlserver/sqlserver.go | 58 +- 6 files changed, 1548 insertions(+), 700 deletions(-) rename plugins/inputs/sqlserver/{azuresqlqueries.go => azuresqldbqueries.go} (55%) create mode 100644 plugins/inputs/sqlserver/azuresqlmanagedqueries.go create mode 100644 plugins/inputs/sqlserver/azuresqlpoolqueries.go create mode 100644 plugins/inputs/sqlserver/azuresqlpoolqueries_test.go diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 10f6064581dfb..c92f0db9af2f3 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,18 +1,20 @@ # SQL Server Input Plugin + The `sqlserver` plugin provides metrics for your SQL Server instance. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. ### The SQL Server plugin supports the following editions/versions of SQL Server + - SQL Server - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) - - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will - need to be addressed by the community. + - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance -### Additional Setup: +### Additional Setup + +You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: -You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: ```sql USE master; GO @@ -25,6 +27,7 @@ GO ``` For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database. + ```sql CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword'; GO @@ -32,7 +35,28 @@ GRANT VIEW DATABASE STATE TO [telegraf]; GO ``` -### Configuration: +For Azure SQL Elastic Pool, please follow the following instructions to collect metrics. + +On master logical database, create an SQL login 'telegraf' and assign it to the server-level role ##MS_ServerStateReader##. + +```sql +CREATE LOGIN [telegraf] WITH PASSWORD = N'mystrongpassword'; +GO +ALTER SERVER ROLE ##MS_ServerStateReader## + ADD MEMBER [telegraf]; +GO +``` + +Elastic pool metrics can be collected from any database in the pool if a user for the `telegraf` login is created in that database. For collection to work, this database must remain in the pool, and must not be renamed. If you plan to add/remove databases from this pool, create a separate database for monitoring purposes that will remain in the pool. + +> Note: To avoid duplicate monitoring data, do not collect elastic pool metrics from more than one database in the same pool. + +```sql +GO +CREATE USER [telegraf] FOR LOGIN telegraf; +``` + +### Configuration ```toml [agent] @@ -58,42 +82,34 @@ GO ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. - ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" - - ## Queries enabled by default for database_type = "AzureSQLDB" are - - ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, - ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" - # database_type = "AzureSQLDB" + database_type = "SQLServer" - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] + ## A list of queries to include. If not specified, all the below listed queries are used. + include_query = [] ## A list of queries to explicitly ignore. - # exclude_query = [] - - ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - - ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, - ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - - # database_type = "AzureSQLManagedInstance" - - # include_query = [] - - # exclude_query = [] + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] ## Queries enabled by default for database_type = "SQLServer" are - ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, - ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates - database_type = "SQLServer" + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers - include_query = [] + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default - exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + ## Queries enabled by default for database_type = "AzureSQLPool" are - + ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, + ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers - ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## Following are old config settings + ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use ## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version @@ -126,30 +142,39 @@ GO ## - AzureSQLDBRequests ## - AzureSQLDBSchedulers - ## database_type = AzureSQLManagedInstance by default collects the following queries - ## - AzureSQLMIResourceStats - ## - AzureSQLMIResourceGovernance - ## - AzureSQLMIDatabaseIO - ## - AzureSQLMIServerProperties - ## - AzureSQLMIOsWaitstats - ## - AzureSQLMIMemoryClerks - ## - AzureSQLMIPerformanceCounters - ## - AzureSQLMIRequests - ## - AzureSQLMISchedulers - - ## database_type = SQLServer by default collects the following queries - ## - SQLServerPerformanceCounters - ## - SQLServerWaitStatsCategorized - ## - SQLServerDatabaseIO - ## - SQLServerProperties - ## - SQLServerMemoryClerks - ## - SQLServerSchedulers - ## - SQLServerRequests - ## - SQLServerVolumeSpace - ## - SQLServerCpu - ## and following as optional (if mentioned in the include_query list) - ## - SQLServerAvailabilityReplicaStates - ## - SQLServerDatabaseReplicaStates + ## database_type = AzureSQLManagedInstance by default collects the following queries + ## - AzureSQLMIResourceStats + ## - AzureSQLMIResourceGovernance + ## - AzureSQLMIDatabaseIO + ## - AzureSQLMIServerProperties + ## - AzureSQLMIOsWaitstats + ## - AzureSQLMIMemoryClerks + ## - AzureSQLMIPerformanceCounters + ## - AzureSQLMIRequests + ## - AzureSQLMISchedulers + + ## database_type = AzureSQLPool by default collects the following queries + ## - AzureSQLPoolResourceStats + ## - AzureSQLPoolResourceGovernance + ## - AzureSQLPoolDatabaseIO + ## - AzureSQLPoolOsWaitStats, + ## - AzureSQLPoolMemoryClerks + ## - AzureSQLPoolPerformanceCounters + ## - AzureSQLPoolSchedulers + + ## database_type = SQLServer by default collects the following queries + ## - SQLServerPerformanceCounters + ## - SQLServerWaitStatsCategorized + ## - SQLServerDatabaseIO + ## - SQLServerProperties + ## - SQLServerMemoryClerks + ## - SQLServerSchedulers + ## - SQLServerRequests + ## - SQLServerVolumeSpace + ## - SQLServerCpu + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates ## Version 2 by default collects the following queries ## Version 2 is being deprecated, please consider using database_type. @@ -175,7 +200,6 @@ GO ## - MemoryClerk ## - VolumeSpace ## - PerformanceMetrics - ``` ### Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) @@ -190,6 +214,7 @@ To enable support for AAD authentication, we leverage the existing AAD authentic - Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). - On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. + ```sql EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = '''') BEGIN @@ -198,25 +223,31 @@ EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = ''] FROM EXTERNAL PROVIDER') EXECUTE ('GRANT VIEW DATABASE STATE TO []') ``` + - On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" + ```toml servers = [ "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", ] auth_method = "AAD" ``` + - Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). -### Metrics: +### Metrics + To provide backwards compatibility, this plugin support two versions of metrics queries. **Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. -#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type. +#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type + The original metrics queries provide: + - *Performance counters*: 1000+ metrics from `sys.dm_os_performance_counters` - *Performance metrics*: special performance and ratio metrics - *Wait stats*: wait tasks categorized from `sys.dm_os_wait_stats` @@ -229,12 +260,15 @@ The original metrics queries provide: - *CPU*: cpu usage from `sys.dm_os_ring_buffers` If you are using the original queries all stats have the following tags: + - `servername`: hostname:instance - `type`: type of stats to easily filter measurements -#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type. +#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type + The new (version 2) metrics provide: -- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` + +- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats`. - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. - *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more @@ -263,115 +297,133 @@ The new (version 2) metrics provide: - Stats from `sys.dm_db_wait_stats` - Resource governance stats from `sys.dm_user_db_resource_governance` - Stats from `sys.dm_db_resource_stats` - +#### database_type = "AzureSQLDB" -#### database_type = "AzureSQLDB These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs: -- AzureSQLDBDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -= AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` -- AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLDBServerProperties: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. + +- *AzureSQLDBDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLDBMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLDBResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` +- *AzureSQLDBPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLDBServerProperties*: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLDBWaitstats*: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. - *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots. +#### database_type = "AzureSQLManagedInstance" -#### database_type = "AzureSQLManagedInstance These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs: -- AzureSQLMIDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -- AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` -- AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLMIServerProperties: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide -- AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` -- AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. - -#### database_type = "SQLServer -- SQLServerDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` -- SQLServerMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. -- SQLServerPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: + +- *AzureSQLMIDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLMIMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLMIResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` +- *AzureSQLMIPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLMIServerProperties*: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLMIOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide +- *AzureSQLMIRequests*: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` +- *AzureSQLMISchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +#### database_type = "AzureSQLPool" + +These are metrics for Azure SQL to monitor resources usage at Elastic Pool level. These metrics require additional permissions to be collected, please ensure to check additional setup section in this documentation. + +- *AzureSQLPoolResourceStats*: Returns resource usage statistics for the current elastic pool in a SQL Database server. Queried from `sys.dm_resource_governor_resource_pools_history_ex`. +- *AzureSQLPoolResourceGovernance*: Returns actual configuration and capacity settings used by resource governance mechanisms in the current elastic pool. Queried from `sys.dm_user_db_resource_governance`. +- *AzureSQLPoolDatabaseIO*: Returns I/O statistics for data and log files for each database in the pool. Queried from `sys.dm_io_virtual_file_stats`. +- *AzureSQLPoolOsWaitStats*: Returns information about all the waits encountered by threads that executed. Queried from `sys.dm_os_wait_stats`. +- *AzureSQLPoolMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLPoolPerformanceCounters*: A selected list of performance counters from `sys.dm_os_performance_counters`. Note: Performance counters where the cntr_type column value is 537003264 are already returned with a percentage format between 0 and 100. For other counters, please check [sys.dm_os_performance_counters](https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current) documentation. +- *AzureSQLPoolSchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +#### database_type = "SQLServer" + +- *SQLServerDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` +- *SQLServerMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. +- *SQLServerPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more - *Availability Groups*: Bytes sent to replica, Bytes received from replica, Log bytes received, Log send queue, transaction delay, + more - *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. -- SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. -- SQLServerSchedulers - This captures `sys.dm_os_schedulers`. -- SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and +- *SQLServerProperties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. +- *SQLServerWaitStatsCategorized*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *SQLServerSchedulers*: This captures `sys.dm_os_schedulers`. +- *SQLServerRequests*: This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and blocking sessions. -- SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. -- SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- *SQLServerVolumeSpace*: Uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- SQLServerCpu: Uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). - SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup - SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup - #### Output Measures -The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. -`sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` -`sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats -`sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties -`sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk -`sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters -`sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers +The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. +- `sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` +- `sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats +- `sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties +- `sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk +- `sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters +- `sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers The following Performance counter metrics can be used directly, with no delta calculations: - - SQLServer:Buffer Manager\Buffer cache hit ratio - - SQLServer:Buffer Manager\Page life expectancy - - SQLServer:Buffer Node\Page life expectancy - - SQLServer:Database Replica\Log Apply Pending Queue - - SQLServer:Database Replica\Log Apply Ready Queue - - SQLServer:Database Replica\Log Send Queue - - SQLServer:Database Replica\Recovery Queue - - SQLServer:Databases\Data File(s) Size (KB) - - SQLServer:Databases\Log File(s) Size (KB) - - SQLServer:Databases\Log File(s) Used Size (KB) - - SQLServer:Databases\XTP Memory Used (KB) - - SQLServer:General Statistics\Active Temp Tables - - SQLServer:General Statistics\Processes blocked - - SQLServer:General Statistics\Temp Tables For Destruction - - SQLServer:General Statistics\User Connections - - SQLServer:Memory Broker Clerks\Memory broker clerk size - - SQLServer:Memory Manager\Memory Grants Pending - - SQLServer:Memory Manager\Target Server Memory (KB) - - SQLServer:Memory Manager\Total Server Memory (KB) - - SQLServer:Resource Pool Stats\Active memory grant amount (KB) - - SQLServer:Resource Pool Stats\Disk Read Bytes/sec - - SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Read IO/sec - - SQLServer:Resource Pool Stats\Disk Write Bytes/sec - - SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Write IO/sec - - SQLServer:Resource Pool Stats\Used memory (KB) - - SQLServer:Transactions\Free Space in tempdb (KB) - - SQLServer:Transactions\Version Store Size (KB) - - SQLServer:User Settable\Query - - SQLServer:Workload Group Stats\Blocked tasks - - SQLServer:Workload Group Stats\CPU usage % - - SQLServer:Workload Group Stats\Queued requests - - SQLServer:Workload Group Stats\Requests completed/sec + +- SQLServer:Buffer Manager\Buffer cache hit ratio +- SQLServer:Buffer Manager\Page life expectancy +- SQLServer:Buffer Node\Page life expectancy +- SQLServer:Database Replica\Log Apply Pending Queue +- SQLServer:Database Replica\Log Apply Ready Queue +- SQLServer:Database Replica\Log Send Queue +- SQLServer:Database Replica\Recovery Queue +- SQLServer:Databases\Data File(s) Size (KB) +- SQLServer:Databases\Log File(s) Size (KB) +- SQLServer:Databases\Log File(s) Used Size (KB) +- SQLServer:Databases\XTP Memory Used (KB) +- SQLServer:General Statistics\Active Temp Tables +- SQLServer:General Statistics\Processes blocked +- SQLServer:General Statistics\Temp Tables For Destruction +- SQLServer:General Statistics\User Connections +- SQLServer:Memory Broker Clerks\Memory broker clerk size +- SQLServer:Memory Manager\Memory Grants Pending +- SQLServer:Memory Manager\Target Server Memory (KB) +- SQLServer:Memory Manager\Total Server Memory (KB) +- SQLServer:Resource Pool Stats\Active memory grant amount (KB) +- SQLServer:Resource Pool Stats\Disk Read Bytes/sec +- SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Read IO/sec +- SQLServer:Resource Pool Stats\Disk Write Bytes/sec +- SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Write IO/sec +- SQLServer:Resource Pool Stats\Used memory (KB) +- SQLServer:Transactions\Free Space in tempdb (KB) +- SQLServer:Transactions\Version Store Size (KB) +- SQLServer:User Settable\Query +- SQLServer:Workload Group Stats\Blocked tasks +- SQLServer:Workload Group Stats\CPU usage % +- SQLServer:Workload Group Stats\Queued requests +- SQLServer:Workload Group Stats\Requests completed/sec Version 2 queries have the following tags: + - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. #### Health Metric + All collection versions (version 1, version 2, and database_type) support an optional plugin health metric called `sqlserver_telegraf_health`. This metric tracks if connections to SQL Server are succeeding or failing. Users can leverage this metric to detect if their SQL Server monitoring is not working as intended. In the configuration file, toggling `health_metric` to `true` will enable collection of this metric. By default, this value is set to `false` and the metric is not collected. The health metric emits one record for each connection specified by `servers` in the configuration file. The health metric emits the following tags: + - `sql_instance` - Name of the server specified in the connection string. This value is emitted as-is in the connection string. If the server could not be parsed from the connection string, a constant placeholder value is emitted - `database_name` - Name of the database or (initial catalog) specified in the connection string. This value is emitted as-is in the connection string. If the database could not be parsed from the connection string, a constant placeholder value is emitted The health metric emits the following fields: + - `attempted_queries` - Number of queries that were attempted for this connection - `successful_queries` - Number of queries that completed successfully for this connection - `database_type` - Type of database as specified by `database_type`. If `database_type` is empty, the `QueryVersion` and `AzureDB` fields are concatenated instead diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqldbqueries.go similarity index 55% rename from plugins/inputs/sqlserver/azuresqlqueries.go rename to plugins/inputs/sqlserver/azuresqldbqueries.go index 17361c20d41f8..fad68e0ea9b03 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqldbqueries.go @@ -5,7 +5,7 @@ import ( ) //------------------------------------------------------------------------------------------------ -//------------------ Azure SQL Database ------------------------------------------------------ +//------------------ Azure SQL Database ---------------------------------------------------------- //------------------------------------------------------------------------------------------------ // Only executed if AzureDB flag is set const sqlAzureDBResourceStats string = ` @@ -38,7 +38,7 @@ ORDER BY [end_time] DESC; ` -// Resource Governamce is only relevant to Azure SQL DB into separate collector +// Resource Governance is only relevant to Azure SQL DB into separate collector // This will only be collected for Azure SQL Database. const sqlAzureDBResourceGovernance string = ` IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ @@ -678,544 +678,3 @@ SELECT ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` - -//------------------------------------------------------------------------------------------------ -//------------------ Azure Managed Instance ------------------------------------------------------ -//------------------------------------------------------------------------------------------------ -const sqlAzureMIProperties = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP 1 - 'sqlserver_server_properties' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[virtual_core_count] AS [cpu_count] - ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] - ,[sku] - ,SERVERPROPERTY('EngineEdition') AS [engine_edition] - ,[hardware_generation] AS [hardware_type] - ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] - ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] - ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] - ,SERVERPROPERTY('ProductVersion') AS [sql_version] - ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] - ,[db_online] - ,[db_restoring] - ,[db_recovering] - ,[db_recoveryPending] - ,[db_suspect] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.server_resource_stats -CROSS APPLY ( - SELECT - SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] - ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] - ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] - ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] - ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] - ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] - FROM sys.databases -) AS dbs -ORDER BY - [start_time] DESC; -` - -const sqlAzureMIResourceStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP(1) - 'sqlserver_azure_db_resource_stats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM - sys.server_resource_stats -ORDER BY - [end_time] DESC; -` - -const sqlAzureMIResourceGovernance string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_instance_resource_governance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[instance_cap_cpu] - ,[instance_max_log_rate] - ,[instance_max_worker_threads] - ,[tempdb_log_file_number] - ,[volume_local_iops] - ,[volume_external_xstore_iops] - ,[volume_managed_xstore_iops] - ,[volume_type_local_iops] as [voltype_local_iops] - ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] - ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] - ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_instance_resource_governance; -` - -const sqlAzureMIDatabaseIO = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_database_io' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension - ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension - ,mf.[type_desc] AS [file_type] - ,vfs.[io_stall_read_ms] AS [read_latency_ms] - ,vfs.[num_of_reads] AS [reads] - ,vfs.[num_of_bytes_read] AS [read_bytes] - ,vfs.[io_stall_write_ms] AS [write_latency_ms] - ,vfs.[num_of_writes] AS [writes] - ,vfs.[num_of_bytes_written] AS [write_bytes] - ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] - ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs -LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) - ON vfs.[database_id] = mf.[database_id] - AND vfs.[file_id] = mf.[file_id] -WHERE - vfs.[database_id] < 32760 -` - -const sqlAzureMIMemoryClerks = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_memory_clerks' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,mc.[type] AS [clerk_type] - ,SUM(mc.[pages_kb]) AS [size_kb] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) -GROUP BY - mc.[type] -HAVING - SUM(mc.[pages_kb]) >= 1024 -OPTION(RECOMPILE); -` - -const sqlAzureMIOsWaitStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_waitstats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,ws.[wait_type] - ,[wait_time_ms] - ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] - ,[signal_wait_time_ms] - ,[max_wait_time_ms] - ,[waiting_tasks_count] - ,CASE - WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' - WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' - WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' - WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' - WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' - WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' - WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' - WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' - WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' - WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' - or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' - WHEN ws.[wait_type] LIKE 'SLEEP[_]%' - or ws.[wait_type] IN ( - 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', - 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', - 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' - WHEN ws.[wait_type] IN( - 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', - 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' - WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' - WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' - WHEN ws.[wait_type] IN ( - 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', - 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' - WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' - WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' - or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' - WHEN ws.[wait_type] IN( - 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', - 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' - WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') - or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' - or ws.[wait_type] like 'BP%' THEN 'Parallelism' - WHEN ws.[wait_type] IN( - 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', - 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', - 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' - WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' - WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' - or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' - or ws.[wait_type] LIKE 'SE_REPL[_]%' - or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' - WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' - or ws.[wait_type] IN ( - 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', - 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', - 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' - WHEN ws.[wait_type] IN ( - 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', - 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', - 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', - 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' - ELSE 'Other' - END as [wait_category] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) -WHERE - ws.[wait_type] NOT IN ( - N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', - N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', - N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', - N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', - N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', - N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', - N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', - N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', - N'PARALLEL_REDO_WORKER_WAIT_WORK', - N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', - N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', - N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', - N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', - N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', - N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', - N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', - N'QDS_ASYNC_QUEUE', - N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', - N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', - N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', - N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', - N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - N'SQLTRACE_WAIT_ENTRIES', - N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', - N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', - N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', - N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', - N'RBIO_COMM_RETRY') -AND [waiting_tasks_count] > 10 -AND [wait_time_ms] > 100; -` - -const sqlAzureMIPerformanceCounters = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -DECLARE @PCounters TABLE -( - [object_name] nvarchar(128), - [counter_name] nvarchar(128), - [instance_name] nvarchar(128), - [cntr_value] bigint, - [cntr_type] INT , - Primary Key([object_name],[counter_name],[instance_name]) -); - -WITH PerfCounters AS ( - SELECT DISTINCT - RTrim(spi.[object_name]) [object_name] - ,RTrim(spi.[counter_name]) [counter_name] - ,CASE WHEN ( - RTRIM(spi.[object_name]) LIKE '%:Databases' - OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' - OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' - OR RTRIM(spi.[object_name]) LIKE '%:Query Store' - OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' - OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value - WHEN - RTRIM([object_name]) LIKE '%:Availability Replica' - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) - ELSE RTRIM(spi.instance_name) - END AS [instance_name] - ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] - ,spi.[cntr_type] - FROM sys.dm_os_performance_counters AS spi - LEFT JOIN sys.databases AS d - ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID - = CASE - /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ - WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL - THEN d.[name] - ELSE d.[physical_database_name] - END - WHERE - counter_name IN ( - 'SQL Compilations/sec' - ,'SQL Re-Compilations/sec' - ,'User Connections' - ,'Batch Requests/sec' - ,'Logouts/sec' - ,'Logins/sec' - ,'Processes blocked' - ,'Latch Waits/sec' - ,'Full Scans/sec' - ,'Index Searches/sec' - ,'Page Splits/sec' - ,'Page lookups/sec' - ,'Page reads/sec' - ,'Page writes/sec' - ,'Readahead pages/sec' - ,'Lazy writes/sec' - ,'Checkpoint pages/sec' - ,'Table Lock Escalations/sec' - ,'Page life expectancy' - ,'Log File(s) Size (KB)' - ,'Log File(s) Used Size (KB)' - ,'Data File(s) Size (KB)' - ,'Transactions/sec' - ,'Write Transactions/sec' - ,'Active Transactions' - ,'Log Growths' - ,'Active Temp Tables' - ,'Logical Connections' - ,'Temp Tables Creation Rate' - ,'Temp Tables For Destruction' - ,'Free Space in tempdb (KB)' - ,'Version Store Size (KB)' - ,'Memory Grants Pending' - ,'Memory Grants Outstanding' - ,'Free list stalls/sec' - ,'Buffer cache hit ratio' - ,'Buffer cache hit ratio base' - ,'Backup/Restore Throughput/sec' - ,'Total Server Memory (KB)' - ,'Target Server Memory (KB)' - ,'Log Flushes/sec' - ,'Log Flush Wait Time' - ,'Memory broker clerk size' - ,'Log Bytes Flushed/sec' - ,'Bytes Sent to Replica/sec' - ,'Log Send Queue' - ,'Bytes Sent to Transport/sec' - ,'Sends to Replica/sec' - ,'Bytes Sent to Transport/sec' - ,'Sends to Transport/sec' - ,'Bytes Received from Replica/sec' - ,'Receives from Replica/sec' - ,'Flow Control Time (ms/sec)' - ,'Flow Control/sec' - ,'Resent Messages/sec' - ,'Redone Bytes/sec' - ,'XTP Memory Used (KB)' - ,'Transaction Delay' - ,'Log Bytes Received/sec' - ,'Log Apply Pending Queue' - ,'Redone Bytes/sec' - ,'Recovery Queue' - ,'Log Apply Ready Queue' - ,'CPU usage %' - ,'CPU usage % base' - ,'Queued requests' - ,'Requests completed/sec' - ,'Blocked tasks' - ,'Active memory grant amount (KB)' - ,'Disk Read Bytes/sec' - ,'Disk Read IO Throttled/sec' - ,'Disk Read IO/sec' - ,'Disk Write Bytes/sec' - ,'Disk Write IO Throttled/sec' - ,'Disk Write IO/sec' - ,'Used memory (KB)' - ,'Forwarded Records/sec' - ,'Background Writer pages/sec' - ,'Percent Log Used' - ,'Log Send Queue KB' - ,'Redo Queue KB' - ,'Mirrored Write Transactions/sec' - ,'Group Commit Time' - ,'Group Commits/Sec' - ,'Workfiles Created/sec' - ,'Worktables Created/sec' - ,'Distributed Query' - ,'DTC calls' - ,'Query Store CPU usage' - ) OR ( - spi.[object_name] LIKE '%User Settable%' - OR spi.[object_name] LIKE '%SQL Errors%' - OR spi.[object_name] LIKE '%Batch Resp Statistics%' - ) OR ( - spi.[instance_name] IN ('_Total') - AND spi.[counter_name] IN ( - 'Lock Timeouts/sec' - ,'Lock Timeouts (timeout > 0)/sec' - ,'Number of Deadlocks/sec' - ,'Lock Waits/sec' - ,'Latch Waits/sec' - ) - ) -) - -INSERT INTO @PCounters select * from PerfCounters - -SELECT - 'sqlserver_performance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,pc.[object_name] AS [object] - ,pc.[counter_name] AS [counter] - ,CASE pc.[instance_name] - WHEN '_Total' THEN 'Total' - ELSE ISNULL(pc.[instance_name],'') - END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] - ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -from @PCounters pc -LEFT OUTER JOIN @PCounters AS pc1 - ON ( - pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') - OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') - ) - AND pc.[object_name] = pc1.[object_name] - AND pc.[instance_name] = pc1.[instance_name] - AND pc1.[counter_name] LIKE '%base' -WHERE - pc.[counter_name] NOT LIKE '% base' -OPTION (RECOMPILE); -` - -const sqlAzureMIRequests string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 -CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) - -SELECT - 'sqlserver_requests' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,COALESCE(r.[status], s.[status]) AS [status] - ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] - ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] - ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] - ,COALESCE(r.[writes], s.[writes]) AS [writes] - ,r.[command] - ,r.[wait_time] as [wait_time_ms] - ,r.[wait_type] - ,r.[wait_resource] - ,r.[blocking_session_id] - ,s.[program_name] - ,s.[host_name] - ,s.[nt_user_name] - ,s.[login_name] - ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] - ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) - WHEN 0 THEN '0-Read Committed' - WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' - WHEN 2 THEN '2-Read Committed' - WHEN 3 THEN '3-Repeatable Read' - WHEN 4 THEN '4-Serializable' - WHEN 5 THEN '5-Snapshot' - ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' - END, 30) AS [transaction_isolation_level] - ,r.[granted_query_memory] as [granted_query_memory_pages] - ,r.[percent_complete] - ,SUBSTRING( - qt.[text], - r.[statement_start_offset] / 2 + 1, - (CASE WHEN r.[statement_end_offset] = -1 - THEN DATALENGTH(qt.text) - ELSE r.[statement_end_offset] - END - r.[statement_start_offset]) / 2 + 1 - ) AS [statement_text] - ,qt.[objectid] - ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] - ,DB_NAME(qt.[dbid]) [stmt_db_name] - ,CONVERT(varchar(20),[query_hash],1) as [query_hash] - ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] - ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_exec_sessions AS s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON s.[session_id] = r.[session_id] -OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt -WHERE - (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) - OR ( - r.session_id IS NOT NULL - AND ( - s.is_user_process = 1 - OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') - ) - ) -OPTION(MAXDOP 1); -` - -const sqlAzureMISchedulers string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_schedulers' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] - ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] - ,s.[is_online] - ,s.[is_idle] - ,s.[preemptive_switches_count] - ,s.[context_switches_count] - ,s.[current_tasks_count] - ,s.[runnable_tasks_count] - ,s.[current_workers_count] - ,s.[active_workers_count] - ,s.[work_queue_count] - ,s.[pending_disk_io_count] - ,s.[load_factor] - ,s.[yield_count] - ,s.[total_cpu_usage_ms] - ,s.[total_scheduler_delay_ms] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_os_schedulers AS s -` diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go new file mode 100644 index 0000000000000..802afa0ee3fcc --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go @@ -0,0 +1,546 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Managed Instance ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzureMIProperties = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP 1 + 'sqlserver_server_properties' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[virtual_core_count] AS [cpu_count] + ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] + ,[sku] + ,SERVERPROPERTY('EngineEdition') AS [engine_edition] + ,[hardware_generation] AS [hardware_type] + ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] + ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] + ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,SERVERPROPERTY('ProductVersion') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] + ,[db_online] + ,[db_restoring] + ,[db_recovering] + ,[db_recoveryPending] + ,[db_suspect] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.server_resource_stats +CROSS APPLY ( + SELECT + SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] + ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] + ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] + ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] + ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] + ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] + FROM sys.databases +) AS dbs +ORDER BY + [start_time] DESC; +` + +const sqlAzureMIResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.server_resource_stats +ORDER BY + [end_time] DESC; +` + +const sqlAzureMIResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_instance_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[tempdb_log_file_number] + ,[volume_local_iops] + ,[volume_external_xstore_iops] + ,[volume_managed_xstore_iops] + ,[volume_type_local_iops] as [voltype_local_iops] + ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] + ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] + ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_instance_resource_governance; +` + +const sqlAzureMIDatabaseIO = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,mf.[type_desc] AS [file_type] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs +LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) + ON vfs.[database_id] = mf.[database_id] + AND vfs.[file_id] = mf.[file_id] +WHERE + vfs.[database_id] < 32760 +` + +const sqlAzureMIMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +const sqlAzureMIOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,ws.[wait_type] + ,[wait_time_ms] + ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,[signal_wait_time_ms] + ,[max_wait_time_ms] + ,[waiting_tasks_count] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' + or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' + or ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' + or ws.[wait_type] like 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + or ws.[wait_type] LIKE 'SE_REPL[_]%' + or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + or ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzureMIPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +from @PCounters pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' +OPTION (RECOMPILE); +` + +const sqlAzureMIRequests string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 +CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) + +SELECT + 'sqlserver_requests' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,s.[session_id] + ,ISNULL(r.[request_id], 0) as [request_id] + ,COALESCE(r.[status], s.[status]) AS [status] + ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] + ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] + ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] + ,COALESCE(r.[writes], s.[writes]) AS [writes] + ,r.[command] + ,r.[wait_time] as [wait_time_ms] + ,r.[wait_type] + ,r.[wait_resource] + ,r.[blocking_session_id] + ,s.[program_name] + ,s.[host_name] + ,s.[nt_user_name] + ,s.[login_name] + ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] + ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' + END, 30) AS [transaction_isolation_level] + ,r.[granted_query_memory] as [granted_query_memory_pages] + ,r.[percent_complete] + ,SUBSTRING( + qt.[text], + r.[statement_start_offset] / 2 + 1, + (CASE WHEN r.[statement_end_offset] = -1 + THEN DATALENGTH(qt.text) + ELSE r.[statement_end_offset] + END - r.[statement_start_offset]) / 2 + 1 + ) AS [statement_text] + ,qt.[objectid] + ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] + ,DB_NAME(qt.[dbid]) [stmt_db_name] + ,CONVERT(varchar(20),[query_hash],1) as [query_hash] + ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] + ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_exec_sessions AS s +LEFT OUTER JOIN sys.dm_exec_requests AS r + ON s.[session_id] = r.[session_id] +OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt +WHERE + (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) + OR ( + r.session_id IS NOT NULL + AND ( + s.is_user_process = 1 + OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') + ) + ) +OPTION(MAXDOP 1); +` + +const sqlAzureMISchedulers string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ,s.[total_cpu_usage_ms] + ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_schedulers AS s +` diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries.go b/plugins/inputs/sqlserver/azuresqlpoolqueries.go new file mode 100644 index 0000000000000..36fe087fc57e6 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries.go @@ -0,0 +1,477 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Sql Elastic Pool ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzurePoolResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_pool_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[snapshot_time] + ,cast([cap_vcores_used_percent] as float) AS [avg_cpu_percent] + ,cast([avg_data_io_percent] as float) AS [avg_data_io_percent] + ,cast([avg_log_write_percent] as float) AS [avg_log_write_percent] + ,cast([avg_storage_percent] as float) AS [avg_storage_percent] + ,cast([max_worker_percent] as float) AS [max_worker_percent] + ,cast([max_session_percent] as float) AS [max_session_percent] + ,cast([max_data_space_kb]/1024. as int) AS [storage_limit_mb] + ,cast([avg_instance_cpu_percent] as float) AS [avg_instance_cpu_percent] + ,cast([avg_allocated_storage_percent] as float) AS [avg_allocated_storage_percent] +FROM + sys.dm_resource_governor_resource_pools_history_ex +WHERE + [name] = 'SloSharedPool1' +ORDER BY + [snapshot_time] DESC; +` + +const sqlAzurePoolResourceGovernance = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_pool_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[slo_name] + ,[dtu_limit] + ,[cpu_limit] + ,[max_cpu] + ,[cap_cpu] + ,[max_db_memory] + ,[max_db_max_size_in_mb] + ,[db_file_growth_in_mb] + ,[log_size_in_mb] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[checkpoint_rate_mbps] + ,[checkpoint_rate_io] + ,[primary_group_max_workers] + ,[primary_min_log_rate] + ,[primary_max_log_rate] + ,[primary_group_min_io] + ,[primary_group_max_io] + ,[primary_group_min_cpu] + ,[primary_group_max_cpu] + ,[primary_pool_max_workers] + ,[pool_max_io] + ,[volume_local_iops] + ,[volume_managed_xstore_iops] + ,[volume_external_xstore_iops] + ,[volume_type_local_iops] + ,[volume_type_managed_xstore_iops] + ,[volume_type_external_xstore_iops] + ,[volume_pfs_iops] + ,[volume_type_pfs_iops] +FROM + sys.dm_user_db_resource_governance +WHERE database_id = DB_ID(); +` + +const sqlAzurePoolDatabaseIO = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,CASE + WHEN vfs.[database_id] = 1 THEN 'master' + WHEN vfs.[database_id] = 2 THEN 'tempdb' + WHEN vfs.[database_id] = 3 THEN 'model' + WHEN vfs.[database_id] = 4 THEN 'msdb' + ELSE gov.[database_name] + END AS [database_name] + ,vfs.[database_id] + ,vfs.[file_id] + ,CASE + WHEN vfs.[file_id] = 2 THEN 'LOG' + ELSE 'ROWS' + END AS [file_type] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.[io_stall_queued_read_ms] AS [rg_read_stall_ms] + ,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms] + ,[size_on_disk_bytes] + ,ISNULL([size_on_disk_bytes],0)/(1024*1024) AS [size_on_disk_mb] +FROM + sys.dm_io_virtual_file_stats(NULL,NULL) AS vfs +LEFT OUTER JOIN + sys.dm_user_db_resource_governance AS gov +ON vfs.[database_id] = gov.[database_id]; +` + +const sqlAzurePoolOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[wait_type] + ,[waiting_tasks_count] + ,[wait_time_ms] + ,[max_wait_time_ms] + ,[signal_wait_time_ms] + ,[wait_time_ms]-[signal_wait_time_ms] AS [resource_wait_ms] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' THEN 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' OR ws.[wait_type] LIKE 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' OR ws.[wait_type] LIKE 'DTCNEW%' OR ws.[wait_type] LIKE 'TRAN_%' + OR ws.[wait_type] LIKE 'XACT%' OR ws.[wait_type] LIKE 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' OR ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN ( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + OR ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' OR ws.[wait_type] LIKE 'WAIT_RBIO_RG%' THEN 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' OR ws.[wait_type] LIKE 'WAIT_RBIO[_]%' THEN 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + OR ws.[wait_type] LIKE 'HT%' or ws.[wait_type] LIKE 'BMP%' + OR ws.[wait_type] LIKE 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + OR ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + OR ws.[wait_type] LIKE 'SE_REPL[_]%' + OR ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + OR ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END AS [wait_category] +FROM sys.dm_os_wait_stats AS ws +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', + N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzurePoolMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] +FROM + sys.dm_os_memory_clerks AS mc +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +// Specific case on this query when cntr_type = 537003264 to return a percentage value between 0 and 100 +// cf. https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current +// Performance counters where the cntr_type column value is 537003264 display the ratio of a subset to its set as a percentage. +// For example, the Buffer Manager:Buffer cache hit ratio counter compares the total number of cache hits and the total number of cache lookups. +// As such, to get a snapshot-like reading of the last second only, you must compare the delta between the current value and the base value (denominator) +// between two collection points that are one second apart. +// The corresponding base value is the performance counter Buffer Manager:Buffer cache hit ratio base where the cntr_type column value is 1073939712. +const sqlAzurePoolPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] int + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTRIM(pc.[object_name]) AS [object_name] + ,RTRIM(pc.[counter_name]) AS [counter_name] + ,ISNULL(gov.[database_name], RTRIM(pc.instance_name)) AS [instance_name] + ,pc.[cntr_value] AS [cntr_value] + ,pc.[cntr_type] AS [cntr_type] + FROM sys.dm_os_performance_counters AS pc + LEFT JOIN sys.dm_user_db_resource_governance AS gov + ON + TRY_CONVERT([uniqueidentifier], pc.[instance_name]) = gov.[physical_database_guid] + WHERE + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (pc.[object_name] LIKE 'MSSQL%:Databases%' AND pc.[instance_name] IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + pc.[counter_name] IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Query Store CPU usage' + ) OR ( + pc.[object_name] LIKE '%User Settable%' + OR pc.[object_name] LIKE '%SQL Errors%' + OR pc.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + pc.[instance_name] IN ('_Total') + AND pc.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST( + CASE WHEN pc.[cntr_type] = 537003264 AND base.[cntr_value] > 0 + THEN (pc.[cntr_value] * 1.0) / (base.[cntr_value] * 1.0) * 100 + ELSE pc.[cntr_value] + END + AS float) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS base +ON + pc.[counter_name] = REPLACE(base.[counter_name],' base','') + AND pc.[object_name] = base.[object_name] + AND pc.[instance_name] = base.[instance_name] + AND base.[cntr_type] = 1073939712 +WHERE + pc.[cntr_type] <> 1073939712 +OPTION(RECOMPILE) +` + +const sqlAzurePoolSchedulers = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[scheduler_id] + ,[cpu_id] + ,[status] + ,[is_online] + ,[is_idle] + ,[preemptive_switches_count] + ,[context_switches_count] + ,[idle_switches_count] + ,[current_tasks_count] + ,[runnable_tasks_count] + ,[current_workers_count] + ,[active_workers_count] + ,[work_queue_count] + ,[pending_disk_io_count] + ,[load_factor] + ,[failed_to_create_worker] + ,[quantum_length_us] + ,[yield_count] + ,[total_cpu_usage_ms] + ,[total_cpu_idle_capped_ms] + ,[total_scheduler_delay_ms] + ,[ideal_workers_limit] +FROM + sys.dm_os_schedulers; +` diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go new file mode 100644 index 0000000000000..2149e0d23fd4c --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go @@ -0,0 +1,312 @@ +package sqlserver + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestAzureSQL_ElasticPool_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_stats")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "elastic_pool_name")) + require.True(t, acc.HasField("sqlserver_pool_resource_stats", "snapshot_time")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_session_percent")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_stats", "storage_limit_mb")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_allocated_storage_percent")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_ElasticPool_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_governance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cpu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_pfs_iops")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_ElasticPool_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_mb")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolOsWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "elastic_pool_name")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "status")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "idle_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasField("sqlserver_schedulers", "failed_to_create_worker")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "quantum_length_us")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_idle_capped_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "ideal_workers_limit")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 4a965bec15afd..86418a2e65054 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -54,6 +54,7 @@ const defaultServer = "Server=.;app name=telegraf;log=1;" const ( typeAzureSQLDB = "AzureSQLDB" typeAzureSQLManagedInstance = "AzureSQLManagedInstance" + typeAzureSQLPool = "AzureSQLPool" typeSQLServer = "SQLServer" ) @@ -87,42 +88,34 @@ servers = [ ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" +## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" -## Queries enabled by default for database_type = "AzureSQLDB" are - -## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers - -# database_type = "AzureSQLDB" +database_type = "SQLServer" -## A list of queries to include. If not specified, all the above listed queries are used. -# include_query = [] +## A list of queries to include. If not specified, all the below listed queries are used. +include_query = [] ## A list of queries to explicitly ignore. -# exclude_query = [] - -## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - -# database_type = "AzureSQLManagedInstance" - -# include_query = [] - -# exclude_query = [] +exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] ## Queries enabled by default for database_type = "SQLServer" are - ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates -database_type = "SQLServer" +## Queries enabled by default for database_type = "AzureSQLDB" are - +## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers -include_query = [] +## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default -exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +## Queries enabled by default for database_type = "AzureSQLPool" are - +## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers -## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +## Following are old config settings +## You may use them only if you are using the earlier flavor of queries, however it is recommended to use ## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version @@ -156,9 +149,10 @@ func initQueries(s *SQLServer) error { // New config option database_type // To prevent query definition conflicts - // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB - // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI - // Constant defintiions for type "SQLServer" start with sqlServer + // Constant definitions for type "AzureSQLDB" start with sqlAzureDB + // Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI + // Constant definitions for type "AzureSQLPool" start with sqlAzurePool + // Constant definitions for type "SQLServer" start with sqlServer if s.DatabaseType == typeAzureSQLDB { queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false} queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false} @@ -180,6 +174,14 @@ func initQueries(s *SQLServer) error { queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false} queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false} queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false} + } else if s.DatabaseType == typeAzureSQLPool { + queries["AzureSQLPoolResourceStats"] = Query{ScriptName: "AzureSQLPoolResourceStats", Script: sqlAzurePoolResourceStats, ResultByRow: false} + queries["AzureSQLPoolResourceGovernance"] = Query{ScriptName: "AzureSQLPoolResourceGovernance", Script: sqlAzurePoolResourceGovernance, ResultByRow: false} + queries["AzureSQLPoolDatabaseIO"] = Query{ScriptName: "AzureSQLPoolDatabaseIO", Script: sqlAzurePoolDatabaseIO, ResultByRow: false} + queries["AzureSQLPoolOsWaitStats"] = Query{ScriptName: "AzureSQLPoolOsWaitStats", Script: sqlAzurePoolOsWaitStats, ResultByRow: false} + queries["AzureSQLPoolMemoryClerks"] = Query{ScriptName: "AzureSQLPoolMemoryClerks", Script: sqlAzurePoolMemoryClerks, ResultByRow: false} + queries["AzureSQLPoolPerformanceCounters"] = Query{ScriptName: "AzureSQLPoolPerformanceCounters", Script: sqlAzurePoolPerformanceCounters, ResultByRow: false} + queries["AzureSQLPoolSchedulers"] = Query{ScriptName: "AzureSQLPoolSchedulers", Script: sqlAzurePoolSchedulers, ResultByRow: false} } else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} From 488568cafc6e74af82859b57e67c3767fed0cfa0 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 27 Oct 2021 07:11:43 -0600 Subject: [PATCH 005/133] fix: update readme to align with other docs (#10005) --- README.md | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index b76ad45c0d1a3..03d7428c12591 100644 --- a/README.md +++ b/README.md @@ -43,24 +43,29 @@ page or from each [GitHub Releases](https://github.com/influxdata/telegraf/relea InfluxData also provides a package repo that contains both DEB and RPM downloads. -For deb-based platforms run the following to add the repo key and setup a new -sources.list entry: +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: ```shell -curl -s https://repos.influxdata.com/influxdb.key | gpg --dearmor > /etc/apt/trusted.gpg.d/influxdb.gpg -export DISTRIB_ID=$(lsb_release -si); export DISTRIB_CODENAME=$(lsb_release -sc) -echo "deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" > /etc/apt/sources.list.d/influxdb.list +wget -qO- https://repos.influxdata.com/influxdb.key | sudo tee /etc/apt/trusted.gpg.d/influxdb.asc >/dev/null +source /etc/os-release +echo "deb https://repos.influxdata.com/${ID} ${VERSION_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +sudo apt-get update && sudo apt-get install telegraf ``` -For RPM-based platforms use the following repo file in `/etc/yum.repos.d/`: +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: -```text +```shell +cat < Date: Wed, 27 Oct 2021 17:48:57 +0200 Subject: [PATCH 006/133] fix: Linter fixes for plugins/inputs/[k-l]* (#9999) --- .../inputs/kafka_consumer/kafka_consumer.go | 11 +++--- .../kafka_consumer/kafka_consumer_test.go | 12 ++++--- .../kafka_consumer_legacy_integration_test.go | 14 ++++---- .../kinesis_consumer/kinesis_consumer_test.go | 14 ++++---- .../inputs/knx_listener/knx_listener_test.go | 36 +++++++++---------- plugins/inputs/kube_inventory/kube_state.go | 13 +++---- plugins/inputs/kube_inventory/node.go | 13 ++++--- plugins/inputs/kube_inventory/pod.go | 12 +++---- plugins/inputs/lanz/lanz.go | 2 ++ plugins/inputs/lanz/lanz_test.go | 3 +- plugins/inputs/logparser/logparser_test.go | 21 ++++++----- plugins/inputs/logstash/logstash.go | 22 ++++++------ plugins/inputs/lustre2/lustre2_test.go | 6 ++-- 13 files changed, 93 insertions(+), 86 deletions(-) diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 70affdc2372b4..b20004a87a417 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -3,12 +3,12 @@ package kafka_consumer import ( "context" "fmt" - "log" "strings" "sync" "time" "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" @@ -232,7 +232,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { go func() { defer k.wg.Done() for ctx.Err() == nil { - handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag err := k.consumer.Consume(ctx, k.Topics, handler) @@ -276,12 +276,13 @@ type Message struct { session sarama.ConsumerGroupSession } -func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser, log telegraf.Logger) *ConsumerGroupHandler { handler := &ConsumerGroupHandler{ acc: acc.WithTracking(maxUndelivered), sem: make(chan empty, maxUndelivered), undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), parser: parser, + log: log, } return handler } @@ -299,6 +300,8 @@ type ConsumerGroupHandler struct { mu sync.Mutex undelivered map[telegraf.TrackingID]Message + + log telegraf.Logger } // Setup is called once when a new session is opened. It setups up the handler @@ -335,7 +338,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { msg, ok := h.undelivered[track.ID()] if !ok { - log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + h.log.Errorf("Could not mark message delivered: %d", track.ID()) return } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index c73104278338e..68fd9e0627bed 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -6,12 +6,13 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeConsumerGroup struct { @@ -259,7 +260,7 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -274,11 +275,12 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - // This produces a flappy testcase probably due to a race between context cancelation and consumption. + // This produces a flappy testcase probably due to a race between context cancellation and consumption. // Furthermore, it is not clear what the outcome of this test should be... // err = cg.ConsumeClaim(session, &claim) //require.NoError(t, err) // So stick with the line below for now. + //nolint:errcheck cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) @@ -288,7 +290,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -402,7 +404,7 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 976412a7196b5..473c5b9740847 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -6,11 +6,10 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) func TestReadsMetricsFromKafka(t *testing.T) { @@ -51,7 +50,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { var acc testutil.Accumulator // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + require.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { @@ -65,14 +64,14 @@ func TestReadsMetricsFromKafka(t *testing.T) { require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } @@ -84,6 +83,7 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index 6d52f07835e6b..1e0d935e03cc6 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -2,15 +2,17 @@ package kinesis_consumer import ( "encoding/base64" + "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" consumer "github.com/harlow/kinesis-consumer" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "testing" ) func TestKinesisConsumer_onMessage(t *testing.T) { @@ -177,7 +179,7 @@ func TestKinesisConsumer_onMessage(t *testing.T) { ContentEncoding: "notsupported", } err := k.Init() - assert.NotNil(t, err) + require.NotNil(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -187,18 +189,18 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: tt.fields.records, } err := k.Init() - assert.Nil(t, err) + require.Nil(t, err) acc := testutil.Accumulator{} if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) } - assert.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + require.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) for _, metric := range acc.Metrics { if logEventMessage, ok := metric.Fields["message"]; ok { - assert.Contains(t, logEventMessage.(string), tt.expected.messageContains) + require.Contains(t, logEventMessage.(string), tt.expected.messageContains) } else { t.Errorf("Expect logEvents to be present") } diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go index b0502fbbc8e95..adb07eb6d0113 100644 --- a/plugins/inputs/knx_listener/knx_listener_test.go +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -6,14 +6,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/vapourismo/knx-go/knx" "github.com/vapourismo/knx-go/knx/cemi" "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf/testutil" ) const epsilon = 1e-3 @@ -127,17 +125,17 @@ func TestRegularReceives_DPT(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, len(testcases)) for i, m := range acc.Metrics { - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, testcases[i].address, m.Tags["groupaddress"]) - assert.Len(t, m.Fields, 1) + require.Equal(t, "test", m.Measurement) + require.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + require.Len(t, m.Fields, 1) switch v := testcases[i].value.(type) { case bool, int64, uint64: - assert.Equal(t, v, m.Fields["value"]) + require.Equal(t, v, m.Fields["value"]) case float64: - assert.InDelta(t, v, m.Fields["value"], epsilon) + require.InDelta(t, v, m.Fields["value"], epsilon) } - assert.True(t, !tstop.Before(m.Time)) - assert.True(t, !tstart.After(m.Time)) + require.True(t, !tstop.Before(m.Time)) + require.True(t, !tstart.After(m.Time)) } } @@ -178,13 +176,13 @@ func TestRegularReceives_MultipleMessages(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, 2) - assert.Equal(t, "temperature", acc.Metrics[0].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[0].Fields, 1) - assert.Equal(t, true, acc.Metrics[0].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[0].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + require.Len(t, acc.Metrics[0].Fields, 1) + require.Equal(t, true, acc.Metrics[0].Fields["value"]) - assert.Equal(t, "temperature", acc.Metrics[1].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[1].Fields, 1) - assert.Equal(t, false, acc.Metrics[1].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[1].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + require.Len(t, acc.Metrics[1].Fields, 1) + require.Equal(t, false, acc.Metrics[1].Fields["value"]) } diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 24db993dd39bb..94cb5faf9048b 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,7 +3,6 @@ package kube_inventory import ( "context" "fmt" - "log" "os" "strconv" "strings" @@ -37,6 +36,8 @@ type KubernetesInventory struct { SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig client *client @@ -169,15 +170,15 @@ func atoi(s string) int64 { return i } -func convertQuantity(s string, m float64) int64 { +func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + ki.Log.Debugf("failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + ki.Log.Debugf("failed to parse float: %s", err.Error()) return 0 } if m < 1 { @@ -187,11 +188,11 @@ func convertQuantity(s string, m float64) int64 { } func (ki *KubernetesInventory) createSelectorFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) if err != nil { return err } - ki.selectorFilter = filter + ki.selectorFilter = selectorFilter return nil } diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index 3c7c9cb38e160..b46b4e6209ffc 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -26,13 +26,12 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato } for resourceName, val := range n.Status.Capacity { - switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = convertQuantity(val.String(), 1) - fields["capacity_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.String(), 1) + fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["capacity_pods"] = atoi(val.String()) } @@ -41,10 +40,10 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = convertQuantity(val.String(), 1) - fields["allocatable_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.String(), 1) + fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["allocatable_pods"] = atoi(val.String()) } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index ab4e5dd287cbe..ed95dd63d970d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -35,11 +35,11 @@ func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) if !ok { cs = &corev1.ContainerStatus{} } - gatherPodContainer(ki, p, *cs, c, acc) + ki.gatherPodContainer(p, *cs, c, acc) } } -func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" @@ -103,17 +103,17 @@ func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.Contain for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 86bb93a8f754b..a77e99df61f6e 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -8,6 +8,7 @@ import ( "github.com/aristanetworks/goarista/lanz" pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -85,6 +86,7 @@ func (l *Lanz) Stop() { } func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { + //nolint:gosimple // for-select used on purpose for { select { case msg, ok := <-in: diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 684bfc8902bb8..f2a8b5815e36d 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -6,7 +6,8 @@ import ( "testing" pb "github.com/aristanetworks/goarista/lanz/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 3100c615cd4e4..a2f780afd21b9 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,7 +24,7 @@ func TestStartNoParsers(t *testing.T) { } acc := testutil.Accumulator{} - assert.Error(t, logparser.Start(&acc)) + require.Error(t, logparser.Start(&acc)) } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { @@ -41,7 +40,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { acc := testutil.Accumulator{} err := logparser.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestGrokParseLogFiles(t *testing.T) { @@ -112,7 +111,7 @@ func TestGrokParseLogFiles(t *testing.T) { func TestGrokParseLogFilesAppearLater(t *testing.T) { emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) - assert.NoError(t, err) + require.NoError(t, err) logparser := &LogParserPlugin{ Log: testutil.Logger{}, @@ -126,17 +125,17 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) - assert.NoError(t, err) + require.NoError(t, err) err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) - assert.NoError(t, err) + require.NoError(t, err) - assert.NoError(t, acc.GatherError(logparser.Gather)) + require.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) logparser.Stop() @@ -170,7 +169,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -202,7 +201,7 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 6fcaadabcd244..9f5a198587e4d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -179,8 +179,8 @@ func (logstash *Logstash) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { - request, err := http.NewRequest("GET", url, nil) +func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -206,7 +206,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -218,10 +218,10 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { } // gatherJVMStats gather the JVM metrics and add results to the accumulator -func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJSONData(url, jvmStats) + err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } @@ -244,10 +244,10 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu } // gatherJVMStats gather the Process metrics and add results to the accumulator -func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJSONData(url, processStats) + err := logstash.gatherJSONData(address, processStats) if err != nil { return err } @@ -403,10 +403,10 @@ func (logstash *Logstash) gatherQueueStats( } // gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) -func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJSONData(url, pipelineStats) + err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } @@ -447,10 +447,10 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } // gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) -func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJSONData(url, pipelinesStats) + err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 7fd3fd91f469e..3c5659e18f14f 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -7,11 +7,11 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Set config file variables to point to fake directory structure instead of /proc? @@ -358,7 +358,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) - assert.Equal(t, Lustre2{ + require.Equal(t, Lustre2{ OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", From 0088be7da1c97b151534b873ad48867845b975a5 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 27 Oct 2021 10:49:20 -0500 Subject: [PATCH 007/133] fix: stop triggering share-artifacts on release/tags (#9996) --- .circleci/config.yml | 3 +++ scripts/check-file-changes.sh | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 028198bbdb236..e5d535bf41115 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -535,6 +535,9 @@ workflows: branches: ignore: - master + - release.* + tags: + ignore: /.*/ - 'release': requires: - 'test-go-windows' diff --git a/scripts/check-file-changes.sh b/scripts/check-file-changes.sh index fa141afc4a23e..72280fcc0a5e5 100755 --- a/scripts/check-file-changes.sh +++ b/scripts/check-file-changes.sh @@ -1,9 +1,10 @@ #!/bin/bash +# CIRCLE-CI SCRIPT: This file is used exclusively for CI # To prevent the tests/builds to run for only a doc change, this script checks what files have changed in a pull request. BRANCH="$(git rev-parse --abbrev-ref HEAD)" echo $BRANCH -if [[ "$BRANCH" != "master" ]] && [[ "$BRANCH" != release* ]]; then # This should never skip for master and release branches +if [[ ${CIRCLE_PULL_REQUEST##*/} != "" ]]; then # Only skip if their is an associated pull request with this job # Ask git for all the differences between this branch and master # Then use grep to look for changes in the .circleci/ directory, anything named *.go or *.mod or *.sum or *.sh or Makefile # If no match is found, then circleci step halt will stop the CI job but mark it successful From 9ff9166cb254de05838c5e0204edbc013bf8f5fb Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 13:15:25 -0600 Subject: [PATCH 008/133] Update changelog (cherry picked from commit f7a4d20bbb7f7eaa6278bbcee5c62eeeb53d8cc7) --- etc/telegraf.conf | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index ae5680b32d52f..49aa4c327c287 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -3834,6 +3834,15 @@ # # ## List of interfaces to ignore when pulling metrics. # # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] # # Read metrics from one or more commands that can output to stdout From 96c66d3a45e41594693e1a709f9f8a302cff9ba9 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 27 Oct 2021 13:50:47 -0600 Subject: [PATCH 009/133] Update changelog (cherry picked from commit 6c03be4e7382e3f8efdf1a5caccc5eb8698caa92) --- CHANGELOG.md | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8760b914b7f95..d03253afbcff7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,43 @@ +## v1.20.3 [2021-10-27] + +#### Release Notes + + - [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +#### Bugfixes + + - [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 + - [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs + - [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps + - [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 + - [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation + - [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests + - [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library + - [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys + - [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD + - [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory + - [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin + - [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field + - [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels + - [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size + - [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook + - [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset + - [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 + - [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 + - [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 + - [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql + - [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible + - [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place + - [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 + - [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 + - [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage + - [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +#### New External Plugins + + - [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka + - [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + ## v1.20.2 [2021-10-07] #### Bugfixes @@ -134,7 +174,7 @@ #### Bugfixes - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions - - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written + - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column @@ -243,7 +283,7 @@ #### New Input Plugins -- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble - [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda @@ -397,7 +437,7 @@ #### New Parsers - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan - + #### New Serializers - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox @@ -405,7 +445,7 @@ - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope - + ## v1.17.3 [2021-02-17] #### Bugfixes From 343e846480d575afd96868e6b537d624d05049fe Mon Sep 17 00:00:00 2001 From: Chris Ruscio Date: Thu, 28 Oct 2021 16:35:22 -0400 Subject: [PATCH 010/133] feat: add max_processing_time config to Kafka Consumer input (#9988) --- plugins/inputs/kafka_consumer/README.md | 9 +++++ .../inputs/kafka_consumer/kafka_consumer.go | 33 ++++++++++++++----- .../kafka_consumer/kafka_consumer_test.go | 12 +++++++ 3 files changed, 46 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 741f24d04e75e..f4629ed4e11e3 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -93,6 +93,15 @@ and use the old zookeeper connection method. ## waiting until the next flush_interval. # max_undelivered_messages = 1000 + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index b20004a87a417..1aff773a5d8cf 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -10,6 +10,7 @@ import ( "github.com/Shopify/sarama" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/inputs" @@ -101,6 +102,15 @@ const sampleConfig = ` ## waiting until the next flush_interval. # max_undelivered_messages = 1000 + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -110,6 +120,7 @@ const sampleConfig = ` const ( defaultMaxUndeliveredMessages = 1000 + defaultMaxProcessingTime = config.Duration(100 * time.Millisecond) defaultConsumerGroup = "telegraf_metrics_consumers" reconnectDelay = 5 * time.Second ) @@ -118,14 +129,15 @@ type empty struct{} type semaphore chan empty type KafkaConsumer struct { - Brokers []string `toml:"brokers"` - ConsumerGroup string `toml:"consumer_group"` - MaxMessageLen int `toml:"max_message_len"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - Offset string `toml:"offset"` - BalanceStrategy string `toml:"balance_strategy"` - Topics []string `toml:"topics"` - TopicTag string `toml:"topic_tag"` + Brokers []string `toml:"brokers"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + MaxProcessingTime config.Duration `toml:"max_processing_time"` + Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` + Topics []string `toml:"topics"` + TopicTag string `toml:"topic_tag"` kafka.ReadConfig @@ -172,6 +184,9 @@ func (k *KafkaConsumer) Init() error { if k.MaxUndeliveredMessages == 0 { k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages } + if time.Duration(k.MaxProcessingTime) == 0 { + k.MaxProcessingTime = defaultMaxProcessingTime + } if k.ConsumerGroup == "" { k.ConsumerGroup = defaultConsumerGroup } @@ -209,6 +224,8 @@ func (k *KafkaConsumer) Init() error { k.ConsumerCreator = &SaramaCreator{} } + config.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) + k.config = config return nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 68fd9e0627bed..7d31dad92549d 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" @@ -64,6 +65,7 @@ func TestInit(t *testing.T) { require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) require.Equal(t, plugin.config.ClientID, "Telegraf") require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 100*time.Millisecond) }, }, { @@ -165,6 +167,16 @@ func TestInit(t *testing.T) { require.True(t, plugin.config.Net.TLS.Enable) }, }, + { + name: "custom max_processing_time", + plugin: &KafkaConsumer{ + MaxProcessingTime: config.Duration(1000 * time.Millisecond), + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 1000*time.Millisecond) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 7d6672c53a0d2f2e8ac4c3aff91f934345230f45 Mon Sep 17 00:00:00 2001 From: bustedware Date: Thu, 28 Oct 2021 16:42:49 -0400 Subject: [PATCH 011/133] feat: add mongodb output plugin (#9923) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 - go.mod | 3 +- go.sum | 5 +- plugins/common/tls/config.go | 5 +- plugins/common/tls/config_test.go | 21 ++ plugins/outputs/all/all.go | 1 + plugins/outputs/mongodb/README.md | 43 +++ plugins/outputs/mongodb/dev/Dockerfile | 22 ++ plugins/outputs/mongodb/dev/mongodb.sh | 34 +++ plugins/outputs/mongodb/mongodb.go | 253 +++++++++++++++++ plugins/outputs/mongodb/mongodb_test.go | 352 ++++++++++++++++++++++++ testutil/pki/client.pem | 28 ++ testutil/pki/clientenc.pem | 31 +++ testutil/pki/clientenckey.pem | 18 ++ testutil/pki/server.pem | 28 ++ testutil/pki/tls-certs.sh | 26 +- testutil/tls.go | 16 ++ 17 files changed, 872 insertions(+), 15 deletions(-) create mode 100644 plugins/outputs/mongodb/README.md create mode 100644 plugins/outputs/mongodb/dev/Dockerfile create mode 100644 plugins/outputs/mongodb/dev/mongodb.sh create mode 100644 plugins/outputs/mongodb/mongodb.go create mode 100644 plugins/outputs/mongodb/mongodb_test.go create mode 100644 testutil/pki/client.pem create mode 100644 testutil/pki/clientenc.pem create mode 100644 testutil/pki/clientenckey.pem create mode 100644 testutil/pki/server.pem diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ba1ee5147d99e..995ad5f697ed1 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -32,7 +32,6 @@ following works: - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) -- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) diff --git a/go.mod b/go.mod index 573106c6bf3ad..ef426a7d1a8ed 100644 --- a/go.mod +++ b/go.mod @@ -273,7 +273,7 @@ require ( github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect - go.mongodb.org/mongo-driver v1.5.3 + go.mongodb.org/mongo-driver v1.7.3 go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/collector/model v0.35.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 @@ -334,7 +334,6 @@ require ( ) require ( - github.com/aws/aws-sdk-go v1.38.3 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect diff --git a/go.sum b/go.sum index b138256dd6834..ebbc319d44333 100644 --- a/go.sum +++ b/go.sum @@ -299,7 +299,6 @@ github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= @@ -2134,8 +2133,8 @@ go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= -go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= -go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 271d63e7cac2e..457f31e4162a1 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,9 +4,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "github.com/influxdata/telegraf/internal/choice" "os" "strings" + + "github.com/influxdata/telegraf/internal/choice" ) // ClientConfig represents the standard client TLS config. @@ -14,6 +15,7 @@ type ClientConfig struct { TLSCA string `toml:"tls_ca"` TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` InsecureSkipVerify bool `toml:"insecure_skip_verify"` ServerName string `toml:"tls_server_name"` @@ -27,6 +29,7 @@ type ClientConfig struct { type ServerConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` TLSCipherSuites []string `toml:"tls_cipher_suites"` TLSMinVersion string `toml:"tls_min_version"` diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index b118c48b5f912..5fee4a5e08214 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -33,6 +33,15 @@ func TestClientConfig(t *testing.T) { TLSKey: pki.ClientKeyPath(), }, }, + { + name: "success with tls key password set", + client: tls.ClientConfig{ + TLSCA: pki.CACertPath(), + TLSCert: pki.ClientCertPath(), + TLSKey: pki.ClientKeyPath(), + TLSKeyPwd: "", + }, + }, { name: "invalid ca", client: tls.ClientConfig{ @@ -137,6 +146,18 @@ func TestServerConfig(t *testing.T) { TLSMaxVersion: pki.TLSMaxVersion(), }, }, + { + name: "success with tls key password set", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSKeyPwd: "", + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, { name: "missing tls cipher suites is okay", server: tls.ServerConfig{ diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 33b2f92dd01df..ff3f2251a9994 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -31,6 +31,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/librato" _ "github.com/influxdata/telegraf/plugins/outputs/logzio" _ "github.com/influxdata/telegraf/plugins/outputs/loki" + _ "github.com/influxdata/telegraf/plugins/outputs/mongodb" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" _ "github.com/influxdata/telegraf/plugins/outputs/nats" _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" diff --git a/plugins/outputs/mongodb/README.md b/plugins/outputs/mongodb/README.md new file mode 100644 index 0000000000000..0f9ca99730772 --- /dev/null +++ b/plugins/outputs/mongodb/README.md @@ -0,0 +1,43 @@ +# MongoDB Output Plugin + +This plugin sends metrics to MongoDB and automatically creates the collections as time series collections when they don't already exist. +**Please note:** Requires MongoDB 5.0+ for Time Series Collections + +### Configuration: + +```toml +# A plugin that can transmit logs to mongodb +[[outputs.mongodb]] + # connection string examples for mongodb + dsn = "mongodb://localhost:27017" + # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" + + # overrides serverSelectionTimeoutMS in dsn if set + # timeout = "30s" + + # default authentication, optional + # authentication = "NONE" + + # for SCRAM-SHA-256 authentication + # authentication = "SCRAM" + # username = "root" + # password = "***" + + # for x509 certificate authentication + # authentication = "X509" + # tls_ca = "ca.pem" + # tls_key = "client.pem" + # # tls_key_pwd = "changeme" # required for encrypted tls_key + # insecure_skip_verify = false + + # database to store measurements and time series collections + # database = "telegraf" + + # granularity can be seconds, minutes, or hours. + # configuring this value will be based on your input collection frequency. + # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection + # granularity = "seconds" + + # optionally set a TTL to automatically expire documents from the measurement collections. + # ttl = "360h" +``` \ No newline at end of file diff --git a/plugins/outputs/mongodb/dev/Dockerfile b/plugins/outputs/mongodb/dev/Dockerfile new file mode 100644 index 0000000000000..3745b83554462 --- /dev/null +++ b/plugins/outputs/mongodb/dev/Dockerfile @@ -0,0 +1,22 @@ +FROM docker.io/library/mongo:latest + +RUN apt-get update && \ + apt-get install -y openssh-client + +WORKDIR /var/log +RUN mkdir -p mongodb_noauth/ mongodb_scram/ mongodb_x509/ mongodb_x509_expire/ + +WORKDIR /opt +COPY ./testutil/pki/tls-certs.sh . +RUN mkdir -p data/noauth data/scram data/x509 data/x509_expire +RUN /opt/tls-certs.sh + +COPY ./plugins/outputs/mongodb/dev/mongodb.sh . +RUN chmod +x mongodb.sh + +EXPOSE 27017 +EXPOSE 27018 +EXPOSE 27019 +EXPOSE 27020 + +CMD ./mongodb.sh diff --git a/plugins/outputs/mongodb/dev/mongodb.sh b/plugins/outputs/mongodb/dev/mongodb.sh new file mode 100644 index 0000000000000..c3f10deead023 --- /dev/null +++ b/plugins/outputs/mongodb/dev/mongodb.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# no auth +mongod --dbpath data/noauth --fork --logpath /var/log/mongodb_noauth/mongod.log --bind_ip 0.0.0.0 --port 27017 + +# scram auth +mongod --dbpath data/scram --fork --logpath /var/log/mongodb_scram/mongod.log --bind_ip 0.0.0.0 --port 27018 +mongo localhost:27018/admin --eval "db.createUser({user:\"root\", pwd:\"changeme\", roles:[{role:\"root\",db:\"admin\"}]})" +mongo localhost:27018/admin --eval "db.shutdownServer()" +mongod --dbpath data/scram --fork --logpath /var/log/mongodb_scram/mongod.log --auth --setParameter authenticationMechanisms=SCRAM-SHA-256 --bind_ip 0.0.0.0 --port 27018 + +# get client certificate subject for creating x509 authenticating user +dn=$(openssl x509 -in ./private/client.pem -noout -subject -nameopt RFC2253 | sed 's/subject=//g') + +# x509 auth +mongod --dbpath data/x509 --fork --logpath /var/log/mongodb_x509/mongod.log --bind_ip 0.0.0.0 --port 27019 +mongo localhost:27019/admin --eval "db.getSiblingDB(\"\$external\").runCommand({createUser:\"$dn\",roles:[{role:\"root\",db:\"admin\"}]})" +mongo localhost:27019/admin --eval "db.shutdownServer()" +mongod --dbpath data/x509 --fork --logpath /var/log/mongodb_x509/mongod.log --auth --setParameter authenticationMechanisms=MONGODB-X509 --tlsMode preferTLS --tlsCAFile certs/cacert.pem --tlsCertificateKeyFile private/server.pem --bind_ip 0.0.0.0 --port 27019 + +# x509 auth short expirey +# mongodb will not start with an expired certificate. service must be started before certificate expires. tests should be run after certificate expiry +mongod --dbpath data/x509_expire --fork --logpath /var/log/mongodb_x509_expire/mongod.log --bind_ip 0.0.0.0 --port 27020 +mongo localhost:27020/admin --eval "db.getSiblingDB(\"\$external\").runCommand({createUser:\"$dn\",roles:[{role:\"root\",db:\"admin\"}]})" +mongo localhost:27020/admin --eval "db.shutdownServer()" +mongod --dbpath data/x509_expire --fork --logpath /var/log/mongodb_x509_expire/mongod.log --auth --setParameter authenticationMechanisms=MONGODB-X509 --tlsMode preferTLS --tlsCAFile certs/cacert.pem --tlsCertificateKeyFile private/serverexp.pem --bind_ip 0.0.0.0 --port 27020 + +# note about key size and mongodb +# x509 must be 2048 bytes or stronger in order for mongodb to start. otherwise you will receive similar error below +# {"keyFile":"/opt/private/server.pem","error":"error:140AB18F:SSL routines:SSL_CTX_use_certificate:ee key too small"} + +# copy key files to /opt/export. docker volume should point /opt/export to outputs/mongodb/dev in order to run non short x509 tests +cp /opt/certs/cacert.pem /opt/private/client.pem /opt/private/clientenc.pem /opt/export + +while true; do sleep 1; done # leave container running. diff --git a/plugins/outputs/mongodb/mongodb.go b/plugins/outputs/mongodb/mongodb.go new file mode 100644 index 0000000000000..0540e7d10c9a4 --- /dev/null +++ b/plugins/outputs/mongodb/mongodb.go @@ -0,0 +1,253 @@ +package mongodb + +import ( + "context" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +func (s *MongoDB) getCollections(ctx context.Context) error { + s.collections = map[string]bson.M{} + collections, err := s.client.Database(s.MetricDatabase).ListCollections(ctx, bson.M{}) + if err != nil { + return fmt.Errorf("unable to execute ListCollections: %v", err) + } + for collections.Next(ctx) { + var collection bson.M + if err := collections.Decode(&collection); err != nil { + return fmt.Errorf("unable to decode ListCollections: %v", err) + } + name, ok := collection["name"].(string) + if !ok { + return fmt.Errorf("non-string name in %v", collection) + } + s.collections[name] = collection + } + return nil +} + +func (s *MongoDB) insertDocument(ctx context.Context, databaseCollection string, bdoc bson.D) error { + collection := s.client.Database(s.MetricDatabase).Collection(databaseCollection) + _, err := collection.InsertOne(ctx, &bdoc) + return err +} + +type MongoDB struct { + Dsn string `toml:"dsn"` + AuthenticationType string `toml:"authentication"` + MetricDatabase string `toml:"database"` + MetricGranularity string `toml:"granularity"` + Username string `toml:"username"` + Password string `toml:"password"` + ServerSelectTimeout config.Duration `toml:"timeout"` + TTL config.Duration `toml:"ttl"` + Log telegraf.Logger `toml:"-"` + client *mongo.Client + clientOptions *options.ClientOptions + collections map[string]bson.M + tls.ClientConfig +} + +func (s *MongoDB) Description() string { + return "Sends metrics to MongoDB" +} + +var sampleConfig = ` + # connection string examples for mongodb + dsn = "mongodb://localhost:27017" + # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" + + # overrides serverSelectionTimeoutMS in dsn if set + # timeout = "30s" + + # default authentication, optional + # authentication = "NONE" + + # for SCRAM-SHA-256 authentication + # authentication = "SCRAM" + # username = "root" + # password = "***" + + # for x509 certificate authentication + # authentication = "X509" + # tls_ca = "ca.pem" + # tls_key = "client.pem" + # # tls_key_pwd = "changeme" # required for encrypted tls_key + # insecure_skip_verify = false + + # database to store measurements and time series collections + # database = "telegraf" + + # granularity can be seconds, minutes, or hours. + # configuring this value will be based on your input collection frequency. + # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection + # granularity = "seconds" + + # optionally set a TTL to automatically expire documents from the measurement collections. + # ttl = "360h" +` + +func (s *MongoDB) SampleConfig() string { + return sampleConfig +} + +func (s *MongoDB) Init() error { + if s.MetricDatabase == "" { + s.MetricDatabase = "telegraf" + } + switch s.MetricGranularity { + case "": + s.MetricGranularity = "seconds" + case "seconds", "minutes", "hours": + default: + return fmt.Errorf("invalid time series collection granularity. please specify \"seconds\", \"minutes\", or \"hours\"") + } + + // do some basic Dsn checks + if !strings.HasPrefix(s.Dsn, "mongodb://") && !strings.HasPrefix(s.Dsn, "mongodb+srv://") { + return fmt.Errorf("invalid connection string. expected mongodb://host:port/?{options} or mongodb+srv://host:port/?{options}") + } + if !strings.Contains(s.Dsn[strings.Index(s.Dsn, "://")+3:], "/") { //append '/' to Dsn if its missing + s.Dsn = s.Dsn + "/" + } + + serverAPIOptions := options.ServerAPI(options.ServerAPIVersion1) //use new mongodb versioned api + s.clientOptions = options.Client().SetServerAPIOptions(serverAPIOptions) + + switch s.AuthenticationType { + case "SCRAM": + if s.Username == "" { + return fmt.Errorf("SCRAM authentication must specify a username") + } + if s.Password == "" { + return fmt.Errorf("SCRAM authentication must specify a password") + } + credential := options.Credential{ + AuthMechanism: "SCRAM-SHA-256", + Username: s.Username, + Password: s.Password, + } + s.clientOptions.SetAuth(credential) + case "X509": + //format connection string to include tls/x509 options + newConnectionString, err := url.Parse(s.Dsn) + if err != nil { + return err + } + q := newConnectionString.Query() + q.Set("tls", "true") + if s.InsecureSkipVerify { + q.Set("tlsInsecure", strconv.FormatBool(s.InsecureSkipVerify)) + } + if s.TLSCA != "" { + q.Set("tlsCAFile", s.TLSCA) + } + q.Set("sslClientCertificateKeyFile", s.TLSKey) + if s.TLSKeyPwd != "" { + q.Set("sslClientCertificateKeyPassword", s.TLSKeyPwd) + } + newConnectionString.RawQuery = q.Encode() + s.Dsn = newConnectionString.String() + // always auth source $external + credential := options.Credential{ + AuthSource: "$external", + AuthMechanism: "MONGODB-X509", + } + s.clientOptions.SetAuth(credential) + } + + if s.ServerSelectTimeout != 0 { + s.clientOptions.SetServerSelectionTimeout(time.Duration(s.ServerSelectTimeout)) + } + + s.clientOptions.ApplyURI(s.Dsn) + return nil +} + +func (s *MongoDB) createTimeSeriesCollection(databaseCollection string) error { + _, collectionExists := s.collections[databaseCollection] + if !collectionExists { + ctx := context.Background() + tso := options.TimeSeries() + tso.SetTimeField("timestamp") + tso.SetMetaField("tags") + tso.SetGranularity(s.MetricGranularity) + cco := options.CreateCollection() + if s.TTL != 0 { + cco.SetExpireAfterSeconds(int64(time.Duration(s.TTL).Seconds())) + } + cco.SetTimeSeriesOptions(tso) + err := s.client.Database(s.MetricDatabase).CreateCollection(ctx, databaseCollection, cco) + if err != nil { + return fmt.Errorf("unable to create time series collection: %v", err) + } + s.collections[databaseCollection] = bson.M{} + } + return nil +} + +func (s *MongoDB) Connect() error { + ctx := context.Background() + client, err := mongo.Connect(ctx, s.clientOptions) + if err != nil { + return fmt.Errorf("unable to connect: %v", err) + } + s.client = client + if err := s.getCollections(ctx); err != nil { + return fmt.Errorf("unable to get collections from specified metric database: %v", err) + } + return nil +} + +func (s *MongoDB) Close() error { + ctx := context.Background() + return s.client.Disconnect(ctx) +} + +// all metric/measurement fields are parent level of document +// metadata field is named "tags" +// mongodb stores timestamp as UTC. conversion should be performed during reads in app or in aggregation pipeline +func marshalMetric(metric telegraf.Metric) bson.D { + var bdoc bson.D + for k, v := range metric.Fields() { + bdoc = append(bdoc, primitive.E{Key: k, Value: v}) + } + var tags bson.D + for k, v := range metric.Tags() { + tags = append(tags, primitive.E{Key: k, Value: v}) + } + bdoc = append(bdoc, primitive.E{Key: "tags", Value: tags}) + bdoc = append(bdoc, primitive.E{Key: "timestamp", Value: metric.Time()}) + return bdoc +} + +func (s *MongoDB) Write(metrics []telegraf.Metric) error { + ctx := context.Background() + for _, metric := range metrics { + if err := s.createTimeSeriesCollection(metric.Name()); err != nil { + return err + } + bdoc := marshalMetric(metric) + if err := s.insertDocument(ctx, metric.Name(), bdoc); err != nil { + return err + } + } + return nil +} + +func init() { + outputs.Add("mongodb", func() telegraf.Output { return &MongoDB{} }) +} diff --git a/plugins/outputs/mongodb/mongodb_test.go b/plugins/outputs/mongodb/mongodb_test.go new file mode 100644 index 0000000000000..70bb21746992e --- /dev/null +++ b/plugins/outputs/mongodb/mongodb_test.go @@ -0,0 +1,352 @@ +package mongodb + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConnectAndWriteIntegrationNoAuth(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + plugin := &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "NONE", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + } + + // validate config + require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Connect()) + require.NoError(t, plugin.Write(testutil.MockMetrics())) + require.NoError(t, plugin.Close()) +} + +func TestConnectAndWriteIntegrationSCRAMAuth(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + tests := []struct { + name string + plugin *MongoDB + connErrFunc func(t *testing.T, err error) + }{ + { + name: "success with scram authentication", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27018/admin", + AuthenticationType: "SCRAM", + Username: "root", + Password: "changeme", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + }, + connErrFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "fail with scram authentication bad password", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27018/admin", + AuthenticationType: "SCRAM", + Username: "root", + Password: "root", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + }, + connErrFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // validate config + err := tt.plugin.Init() + require.NoError(t, err) + + if err == nil { + // connect + err = tt.plugin.Connect() + tt.connErrFunc(t, err) + + if err == nil { + // insert mock metrics + err = tt.plugin.Write(testutil.MockMetrics()) + require.NoError(t, err) + + // cleanup + err = tt.plugin.Close() + require.NoError(t, err) + } + } + }) + } +} + +func TestConnectAndWriteIntegrationX509Auth(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + tests := []struct { + name string + plugin *MongoDB + connErrFunc func(t *testing.T, err error) + }{ + { + name: "success with x509 authentication", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSCA: "dev/cacert.pem", + TLSKey: "dev/client.pem", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "success with x509 authentication using encrypted key file", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSCA: "dev/cacert.pem", + TLSKey: "dev/clientenc.pem", + TLSKeyPwd: "changeme", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "success with x509 authentication missing ca and using insceure tls", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSKey: "dev/client.pem", + InsecureSkipVerify: true, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "fail with x509 authentication missing ca", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSKey: "dev/client.pem", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "fail with x509 authentication using encrypted key file", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSCA: "dev/cacert.pem", + TLSKey: "dev/clientenc.pem", + TLSKeyPwd: "badpassword", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "fail with x509 authentication using invalid ca", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSCA: "dev/client.pem", + TLSKey: "dev/client.pem", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + { + name: "fail with x509 authentication using invalid key", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27019", + AuthenticationType: "X509", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + ServerSelectTimeout: config.Duration(time.Duration(5) * time.Second), + TTL: config.Duration(time.Duration(5) * time.Minute), + ClientConfig: tls.ClientConfig{ + TLSCA: "dev/cacert.pem", + TLSKey: "dev/cacert.pem", + InsecureSkipVerify: false, + }, + }, + connErrFunc: func(t *testing.T, err error) { + require.Error(t, err) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // validate config + err := tt.plugin.Init() + require.NoError(t, err) + + if err == nil { + // connect + err = tt.plugin.Connect() + tt.connErrFunc(t, err) + + if err == nil { + // insert mock metrics + err = tt.plugin.Write(testutil.MockMetrics()) + require.NoError(t, err) + + // cleanup + err = tt.plugin.Close() + require.NoError(t, err) + } + } + }) + } +} + +func TestConfiguration(t *testing.T) { + tests := []struct { + name string + plugin *MongoDB + errFunc func(t *testing.T, err error) + }{ + { + name: "fail with invalid connection string", + plugin: &MongoDB{ + Dsn: "asdf1234", + AuthenticationType: "NONE", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + TTL: config.Duration(time.Duration(5) * time.Minute), + }, + }, + { + name: "fail with invalid metric granularity", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "NONE", + MetricDatabase: "telegraf_test", + MetricGranularity: "somerandomgranularitythatdoesntwork", + }, + }, + { + name: "fail with scram authentication missing username field", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "SCRAM", + Password: "somerandompasswordthatwontwork", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + }, + }, + { + name: "fail with scram authentication missing password field", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "SCRAM", + Username: "somerandomusernamethatwontwork", + MetricDatabase: "telegraf_test", + MetricGranularity: "seconds", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // validate config + err := tt.plugin.Init() + require.Error(t, err) + }) + } + + tests = []struct { + name string + plugin *MongoDB + errFunc func(t *testing.T, err error) + }{ + { + name: "success init with missing metric database", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "NONE", + MetricGranularity: "seconds", + }, + }, + { + name: "success init missing metric granularity", + plugin: &MongoDB{ + Dsn: "mongodb://localhost:27017", + AuthenticationType: "NONE", + MetricDatabase: "telegraf_test", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // validate config + err := tt.plugin.Init() + require.NoError(t, err) + }) + } +} diff --git a/testutil/pki/client.pem b/testutil/pki/client.pem new file mode 100644 index 0000000000000..e4268b5ede186 --- /dev/null +++ b/testutil/pki/client.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAWKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBUZWxl +Z3JhZiBUZXN0IENBMB4XDTE4MDUwMzAxMDUyOVoXDTI4MDQzMDAxMDUyOVowHTEb +MBkGA1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GN +ADCBiQKBgQDX7Plvu0MJtA9TrusYtQnAogsdiYJZd9wfFIjH5FxE3SWJ4KAIE+yR +WRqcqX8XnpieQLaNsfXhDPWLkWngTDydk4NO/jlAQk0e6+9+NeiZ2ViIHmtXERb9 +CyiiWUmo+YCd69lhzSEIMK9EPBSDHQTgQMtEfGak03G5rx3MCakE1QIDAQABo0sw +STAJBgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SH +BH8AAAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEAVry0 +L07oTN+FMLncY/Be9BzFB3b3mnbxbZr58OgI4WHuOeYBuvDI033FIIIzpwb8XYpG +HJkZlSbviqq19lAh/Cktl35BCNrA6Uc+dgW7QWhnYS2tZandVTo/8FFstJTNiiLw +uiz/Hr3mRXUIDi5OygJHY1IZr8hFTOOJY+0ws3E= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICXAIBAAKBgQDX7Plvu0MJtA9TrusYtQnAogsdiYJZd9wfFIjH5FxE3SWJ4KAI +E+yRWRqcqX8XnpieQLaNsfXhDPWLkWngTDydk4NO/jlAQk0e6+9+NeiZ2ViIHmtX +ERb9CyiiWUmo+YCd69lhzSEIMK9EPBSDHQTgQMtEfGak03G5rx3MCakE1QIDAQAB +AoGAOjRU4Lt3zKvO3d3u3ZAfet+zY1jn3DolCfO9EzUJcj6ymcIFIWhNgrikJcrC +yZkkxrPnAbcQ8oNNxTuDcMTcKZbnyUnlQj5NtVuty5Q+zgf3/Q2pRhaE+TwrpOJ+ +ETtVp9R/PrPN2NC5wPo289fPNWFYkd4DPbdWZp5AJHz1XYECQQD3kKpinJxMYp9F +Q1Qj1OkxGln0KPgdqRYjjW/rXI4/hUodfg+xXWHPFSGj3AgEjQIvuengbOAeH3qo +wF1uxVTlAkEA30hXM3EbboMCDQzNRNkkV9EiZ0MZXhj1aIGl+sQZOmOeFdcdjGkD +dsA42nmaYqXCD9KAvc+S/tGJaa0Qg0VhMQJAb2+TAqh0Qn3yK39PFIH2JcAy1ZDL +fq5p5L75rfwPm9AnuHbSIYhjSo+8gMG+ai3+2fTZrcfUajrJP8S3SfFRcQJBANQQ +POHatxcKzlPeqMaPBXlyY553mAxK4CnVmPLGdL+EBYzwtlu5EVUj09uMSxkOHXYx +k5yzHQVvtXbsrBZBOsECQBJLlkMjJmXrIIdLPmHQWL3bm9MMg1PqzupSEwz6cyrG +uIIm/X91pDyxCHaKYWp38FXBkYAgohI8ow5/sgRvU5w= +-----END RSA PRIVATE KEY----- diff --git a/testutil/pki/clientenc.pem b/testutil/pki/clientenc.pem new file mode 100644 index 0000000000000..63e6099678e81 --- /dev/null +++ b/testutil/pki/clientenc.pem @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAWKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBUZWxl +Z3JhZiBUZXN0IENBMB4XDTE4MDUwMzAxMDUyOVoXDTI4MDQzMDAxMDUyOVowHTEb +MBkGA1UEAwwSY2xpZW50LmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GN +ADCBiQKBgQDX7Plvu0MJtA9TrusYtQnAogsdiYJZd9wfFIjH5FxE3SWJ4KAIE+yR +WRqcqX8XnpieQLaNsfXhDPWLkWngTDydk4NO/jlAQk0e6+9+NeiZ2ViIHmtXERb9 +CyiiWUmo+YCd69lhzSEIMK9EPBSDHQTgQMtEfGak03G5rx3MCakE1QIDAQABo0sw +STAJBgNVHRMEAjAAMAsGA1UdDwQEAwIHgDAaBgNVHREEEzARgglsb2NhbGhvc3SH +BH8AAAEwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQELBQADgYEAVry0 +L07oTN+FMLncY/Be9BzFB3b3mnbxbZr58OgI4WHuOeYBuvDI033FIIIzpwb8XYpG +HJkZlSbviqq19lAh/Cktl35BCNrA6Uc+dgW7QWhnYS2tZandVTo/8FFstJTNiiLw +uiz/Hr3mRXUIDi5OygJHY1IZr8hFTOOJY+0ws3E= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,E07764654058094DE0846DF015F8CD79 + +PdLqVcSk+zB6F8Cbgx7PmyXFvIhcQHQcM4zsuVTSdvTdtrpDk82wLxPTVIU6D7p5 +cqodMKv7xLUV2BSqGfIbSlMHyT6rFskjpZWPUSS9hQ9YlWqsoNflTMT33pNz8eMA +mYj9JlFImRq8o3E9rV2bdaFnt+UwvabPnGWW3EC3PDZRXNNFddu62X0Iip24vy/g +L5hOqkSN9l+m72wvfw0RwdTT8RMCoug+RKD/g2lUJ9l1//UhWV5Urte/cQA7l+6W +ntWzI9hwh1NheO552bOEuroMk9sjWRsYYBRkCp1JJsy+lUxZILQfoC0YP6uroVZT +TWDeWqQ839LYEJHFIZGp5fu1N/Km2HfwctelHwmJmbEMveVKaOv7TdOCjfX0fg8E +fiEvyUCZ3C/vgtZE0U4FZEaOmlGHY6VyylJmMZ20MWz9tsLJNf4GXBdaiMeD7huW +90xdbkncidRtZ/wWBPeqetP/brMu/3+1CMk66kBqVAEnw9pIxL5E3jivxMHHK9Ql +5nFJ+9epgV8wJDrTuVxqLsat/GnqfYcUPcvNgGkghblnJUdQnbM/3mBZCuuVhoMk ++Ggy3ryRiv5pUsgsriOBvZ+mGgx8IlYX8v+wSQEWuA7c/+0ylAPmqyD1B9AK5l6D +KjCxmd8/oiTlhqXZe1Z023p6+12Y+DFjGAfr5S81OwIUV6Txp5IevYdtCAs1OaDT +3F3jeWwOqbfDsXluaTc7J4SxaL4QN/CUI4ag1s0ul2Yj6giTP5g1H85XoGxjk/zN +smmRYOrmUyjChoa10wPSq9BirZ4bETnvj7OgcENaScrPmzG+8Ht6+sk5cRj+sVkv +-----END RSA PRIVATE KEY----- diff --git a/testutil/pki/clientenckey.pem b/testutil/pki/clientenckey.pem new file mode 100644 index 0000000000000..2a5a42c3bd01a --- /dev/null +++ b/testutil/pki/clientenckey.pem @@ -0,0 +1,18 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,E07764654058094DE0846DF015F8CD79 + +PdLqVcSk+zB6F8Cbgx7PmyXFvIhcQHQcM4zsuVTSdvTdtrpDk82wLxPTVIU6D7p5 +cqodMKv7xLUV2BSqGfIbSlMHyT6rFskjpZWPUSS9hQ9YlWqsoNflTMT33pNz8eMA +mYj9JlFImRq8o3E9rV2bdaFnt+UwvabPnGWW3EC3PDZRXNNFddu62X0Iip24vy/g +L5hOqkSN9l+m72wvfw0RwdTT8RMCoug+RKD/g2lUJ9l1//UhWV5Urte/cQA7l+6W +ntWzI9hwh1NheO552bOEuroMk9sjWRsYYBRkCp1JJsy+lUxZILQfoC0YP6uroVZT +TWDeWqQ839LYEJHFIZGp5fu1N/Km2HfwctelHwmJmbEMveVKaOv7TdOCjfX0fg8E +fiEvyUCZ3C/vgtZE0U4FZEaOmlGHY6VyylJmMZ20MWz9tsLJNf4GXBdaiMeD7huW +90xdbkncidRtZ/wWBPeqetP/brMu/3+1CMk66kBqVAEnw9pIxL5E3jivxMHHK9Ql +5nFJ+9epgV8wJDrTuVxqLsat/GnqfYcUPcvNgGkghblnJUdQnbM/3mBZCuuVhoMk ++Ggy3ryRiv5pUsgsriOBvZ+mGgx8IlYX8v+wSQEWuA7c/+0ylAPmqyD1B9AK5l6D +KjCxmd8/oiTlhqXZe1Z023p6+12Y+DFjGAfr5S81OwIUV6Txp5IevYdtCAs1OaDT +3F3jeWwOqbfDsXluaTc7J4SxaL4QN/CUI4ag1s0ul2Yj6giTP5g1H85XoGxjk/zN +smmRYOrmUyjChoa10wPSq9BirZ4bETnvj7OgcENaScrPmzG+8Ht6+sk5cRj+sVkv +-----END RSA PRIVATE KEY----- diff --git a/testutil/pki/server.pem b/testutil/pki/server.pem new file mode 100644 index 0000000000000..c958529dbd332 --- /dev/null +++ b/testutil/pki/server.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAWKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBUZWxl +Z3JhZiBUZXN0IENBMB4XDTE4MDUwMzAxMDUyOVoXDTI4MDQzMDAxMDUyOVowHTEb +MBkGA1UEAwwSc2VydmVyLmxvY2FsZG9tYWluMIGfMA0GCSqGSIb3DQEBAQUAA4GN +ADCBiQKBgQDTBmLJ0pBFUxnPkkx38sBnOKvs+OinVqxTnVcc1iCyQJQleB37uY6D +L55mSsPvnad/oDpyGpHt4RVtrhmyC6ptSrWLyk7mraeAo30Cooqr5tA9A+6yj0ij +ySLlYimTMQy8tbnVNWLwKbxgT9N4NlUzwyqxLWUMfRzLfmefqzk5bQIDAQABo0sw +STAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATALBgNVHQ8E +BAMCBaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQADgYEATNnM +ol0s29lJ+WkP+HUFtKaXxQ+kXLADqfhsk2G1/kZAVRHsYUDlJ+GkHnWIHlg/ggIP +JS+z44iwMPOtzJQI7MvAFYVKpYAEdIFTjXf6GafLjUfoXYi0vwHoVJHtQu3Kpm9L +Ugm02h0ycIadN8RdWAAFUf6XpVKUJa0YYLuyaXY= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDTBmLJ0pBFUxnPkkx38sBnOKvs+OinVqxTnVcc1iCyQJQleB37 +uY6DL55mSsPvnad/oDpyGpHt4RVtrhmyC6ptSrWLyk7mraeAo30Cooqr5tA9A+6y +j0ijySLlYimTMQy8tbnVNWLwKbxgT9N4NlUzwyqxLWUMfRzLfmefqzk5bQIDAQAB +AoGBALWQAgFJxM2QwV1hr59oYnitPudmBa6smRpb/q6V4Y3cmFpgrdN+hIqEtxGl +9E0+5PWfI4o3KCV2itxSdlNFTDyqTZkM+BT8PPKISzAewkdqnKjbWgAmluzOJH4O +hc1zBfIOuT5+cfx5JR5/j9BhWVC7BJ+EiREkd/Z8ZnAMeItVAkEA8bhcC+8luiFQ +6kytXx2XfbKKh4Q99+KEQHqSGeuHZOcnWfjX99jo67CIxpwBRENslpZOw78fBmi4 +4kf8j+dgLwJBAN99zyRxYzKc8TSsy/fF+3V/Ex75HYGGS/eOWcwPFXpGNA63hIa8 +fJ/2pDnLzCqLZ9vWdBF39NtkacJS7bo6XSMCQQCZgN2bipSn3k53bJhRJga1gXOt +2dJMoGIiXHR513QVJSJ9ZaUpNWu9eU9y6VF4m2TTQMLmVnIKbOi0csi2TlZrAkAi +7URsC5RXGpPPiZmutTAhIqTYWFI2JcjFfWenLkxK+aG1ExURAW/wh9kOdz0HARZQ +Eum8uSR5DO5CQjeIvQpFAkAgZJXAwRxuts/p1EoLuPCJTaDkIY2vc0AJzzr5nuAs +pyjnLYCYqSBUJ+3nDDBqNYpgxCJddzmjNxGuO7mef9Ue +-----END RSA PRIVATE KEY----- diff --git a/testutil/pki/tls-certs.sh b/testutil/pki/tls-certs.sh index 55075df4bd1b7..51671d7595aa0 100644 --- a/testutil/pki/tls-certs.sh +++ b/testutil/pki/tls-certs.sh @@ -4,6 +4,7 @@ mkdir certs certs_by_serial private && chmod 700 private && echo 01 > ./serial && touch ./index.txt && +echo 'unique_subject = no' > index.txt.attr cat >./openssl.conf < ./private/client.pem && +cat ./certs/clientcert.pem ./private/clientkeyenc.pem > ./private/clientenc.pem && +cat ./certs/servercert.pem ./private/serverkey.pem > ./private/server.pem && +cat ./certs/servercertexp.pem ./private/serverkey.pem > ./private/serverexp.pem diff --git a/testutil/tls.go b/testutil/tls.go index 686f327d06f49..6bc8d3a89de17 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -72,6 +72,18 @@ func (p *pki) ClientKeyPath() string { return path.Join(p.path, "clientkey.pem") } +func (p *pki) ClientCertAndKeyPath() string { + return path.Join(p.path, "client.pem") +} + +func (p *pki) ClientEncKeyPath() string { + return path.Join(p.path, "clientkeyenc.pem") +} + +func (p *pki) ClientCertAndEncKeyPath() string { + return path.Join(p.path, "clientenc.pem") +} + func (p *pki) ReadServerCert() string { return readCertificate(p.ServerCertPath()) } @@ -88,6 +100,10 @@ func (p *pki) ServerKeyPath() string { return path.Join(p.path, "serverkey.pem") } +func (p *pki) ServerCertAndKeyPath() string { + return path.Join(p.path, "server.pem") +} + func readCertificate(filename string) string { file, err := os.Open(filename) if err != nil { From 8552c1187a124dbcbe35b9bb754349bc5532e49a Mon Sep 17 00:00:00 2001 From: Heiko Schlittermann Date: Fri, 29 Oct 2021 16:05:28 +0200 Subject: [PATCH 012/133] feat: Add use_batch_format for HTTP output plugin (#8184) --- plugins/outputs/http/README.md | 8 ++++- plugins/outputs/http/http.go | 44 +++++++++++++++++------ plugins/outputs/http/http_test.go | 59 +++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index d90192b705a4f..9097792628d66 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -1,7 +1,8 @@ # HTTP Output Plugin This plugin sends metrics in a HTTP message encoded using one of the output -data formats. For data_formats that support batching, metrics are sent in batch format. +data formats. For data_formats that support batching, metrics are sent in +batch format by default. ### Configuration: @@ -49,6 +50,11 @@ data formats. For data_formats that support batching, metrics are sent in batch ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## Use batch serialization format (default) instead of line based format. + ## Batch format is more efficient and should be used unless line based + ## format is really needed. + # use_batch_format = true + ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. # content_encoding = "identity" diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index c94052ea92c1c..b866c60218005 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -8,7 +8,6 @@ import ( "io" "net/http" "strings" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -64,6 +63,11 @@ var sampleConfig = ` ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" + ## Use batch serialization format (default) instead of line based format. + ## Batch format is more efficient and should be used unless line based + ## format is really needed. + # use_batch_format = true + ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. # content_encoding = "identity" @@ -80,9 +84,9 @@ var sampleConfig = ` ` const ( - defaultClientTimeout = 5 * time.Second - defaultContentType = "text/plain; charset=utf-8" - defaultMethod = http.MethodPost + defaultContentType = "text/plain; charset=utf-8" + defaultMethod = http.MethodPost + defaultUseBatchFormat = true ) type HTTP struct { @@ -92,6 +96,7 @@ type HTTP struct { Password string `toml:"password"` Headers map[string]string `toml:"headers"` ContentEncoding string `toml:"content_encoding"` + UseBatchFormat bool `toml:"use_batch_format"` httpconfig.HTTPClientConfig Log telegraf.Logger `toml:"-"` @@ -136,12 +141,30 @@ func (h *HTTP) SampleConfig() string { } func (h *HTTP) Write(metrics []telegraf.Metric) error { - reqBody, err := h.serializer.SerializeBatch(metrics) - if err != nil { - return err + var reqBody []byte + + if h.UseBatchFormat { + var err error + reqBody, err = h.serializer.SerializeBatch(metrics) + if err != nil { + return err + } + + return h.write(reqBody) } - return h.write(reqBody) + for _, metric := range metrics { + var err error + reqBody, err = h.serializer.Serialize(metric) + if err != nil { + return err + } + + if err := h.write(reqBody); err != nil { + return err + } + } + return nil } func (h *HTTP) write(reqBody []byte) error { @@ -205,8 +228,9 @@ func (h *HTTP) write(reqBody []byte) error { func init() { outputs.Add("http", func() telegraf.Output { return &HTTP{ - Method: defaultMethod, - URL: defaultURL, + Method: defaultMethod, + URL: defaultURL, + UseBatchFormat: defaultUseBatchFormat, } }) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index d6803eed3211d..a5fc49b84c4f4 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -15,7 +15,9 @@ import ( "github.com/influxdata/telegraf/metric" httpconfig "github.com/influxdata/telegraf/plugins/common/http" oauth "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/influxdata/telegraf/plugins/serializers/json" "github.com/stretchr/testify/require" ) @@ -32,6 +34,15 @@ func getMetric() telegraf.Metric { return m } +func getMetrics(n int) []telegraf.Metric { + m := make([]telegraf.Metric, n) + for n > 0 { + n-- + m[n] = getMetric() + } + return m +} + func TestInvalidMethod(t *testing.T) { plugin := &HTTP{ URL: "", @@ -455,3 +466,51 @@ func TestDefaultUserAgent(t *testing.T) { require.NoError(t, err) }) } + +func TestBatchedUnbatched(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + client := &HTTP{ + URL: u.String(), + Method: defaultMethod, + } + + var s = map[string]serializers.Serializer{ + "influx": influx.NewSerializer(), + "json": func(s serializers.Serializer, err error) serializers.Serializer { + require.NoError(t, err) + return s + }(json.NewSerializer(time.Second, "")), + } + + for name, serializer := range s { + var requests int + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requests++ + w.WriteHeader(http.StatusOK) + }) + + t.Run(name, func(t *testing.T) { + for _, mode := range [...]bool{false, true} { + requests = 0 + client.UseBatchFormat = mode + client.SetSerializer(serializer) + + err = client.Connect() + require.NoError(t, err) + err = client.Write(getMetrics(3)) + require.NoError(t, err) + + if client.UseBatchFormat { + require.Equal(t, requests, 1, "batched") + } else { + require.Equal(t, requests, 3, "unbatched") + } + } + }) + } +} From 0ebd2f388d7bf8f3fd6dad3fa76f4342bc0c8ea0 Mon Sep 17 00:00:00 2001 From: reimda Date: Fri, 29 Oct 2021 13:43:38 -0600 Subject: [PATCH 013/133] chore: remove unused dockerfiles, add link in docs (#10013) --- docs/DOCKER.md | 3 +++ scripts/alpine.docker | 18 ------------------ scripts/buster.docker | 15 --------------- scripts/docker-entrypoint.sh | 8 -------- scripts/stretch.docker | 15 --------------- 5 files changed, 3 insertions(+), 56 deletions(-) create mode 100644 docs/DOCKER.md delete mode 100644 scripts/alpine.docker delete mode 100644 scripts/buster.docker delete mode 100755 scripts/docker-entrypoint.sh delete mode 100644 scripts/stretch.docker diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000..5d0484e10be5a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,3 @@ +# Telegraf Docker Images + +Docker images for Telegraf are kept in the [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) repo. diff --git a/scripts/alpine.docker b/scripts/alpine.docker deleted file mode 100644 index 84cfcac2268a0..0000000000000 --- a/scripts/alpine.docker +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.17.2 as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN CGO_ENABLED=0 make go-install - -FROM alpine:3.12 -RUN echo 'hosts: files dns' >> /etc/nsswitch.conf -RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \ - update-ca-certificates -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/buster.docker b/scripts/buster.docker deleted file mode 100644 index 17b0cb581cc92..0000000000000 --- a/scripts/buster.docker +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.17.2-buster as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN make go-install - -FROM buildpack-deps:buster-curl -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] diff --git a/scripts/docker-entrypoint.sh b/scripts/docker-entrypoint.sh deleted file mode 100755 index 6e7580b21a92f..0000000000000 --- a/scripts/docker-entrypoint.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -if [ "${1:0:1}" = '-' ]; then - set -- telegraf "$@" -fi - -exec "$@" diff --git a/scripts/stretch.docker b/scripts/stretch.docker deleted file mode 100644 index 39c6e6c1a49d3..0000000000000 --- a/scripts/stretch.docker +++ /dev/null @@ -1,15 +0,0 @@ -FROM golang:1.14.9-stretch as builder -WORKDIR /go/src/github.com/influxdata/telegraf - -COPY . /go/src/github.com/influxdata/telegraf -RUN make go-install - -FROM buildpack-deps:stretch-curl -COPY --from=builder /go/bin/* /usr/bin/ -COPY etc/telegraf.conf /etc/telegraf/telegraf.conf - -EXPOSE 8125/udp 8092/udp 8094 - -COPY scripts/docker-entrypoint.sh /entrypoint.sh -ENTRYPOINT ["/entrypoint.sh"] -CMD ["telegraf"] From 43017559fa77149a432f1e842ae9eab9db9b7441 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Fri, 29 Oct 2021 15:46:09 -0600 Subject: [PATCH 014/133] fix: remove release.sh script (#10030) --- scripts/release.sh | 214 --------------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 scripts/release.sh diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100644 index 22cac0a09cf53..0000000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/bin/sh -# -# usage: release.sh BUILD_NUM -# -# Requirements: -# - curl -# - jq -# - sha256sum -# - awscli -# - gpg -# -# CIRCLE_TOKEN set to a CircleCI API token that can list the artifacts. -# -# AWS cli setup to be able to write to the BUCKET. -# -# GPG setup with a signing key. - -BUILD_NUM="${1:?usage: release.sh BUILD_NUM}" -BUCKET="${2:-dl.influxdata.com/telegraf/releases}" - -: ${CIRCLE_TOKEN:?"Must set CIRCLE_TOKEN"} - -tmpdir="$(mktemp -d -t telegraf.XXXXXXXXXX)" - -on_exit() { - rm -rf "$tmpdir" -} -trap on_exit EXIT - -echo "${tmpdir}" -cd "${tmpdir}" || exit 1 - -curl -s -S -L -H Circle-Token:${CIRCLE_TOKEN} \ - "https://circleci.com/api/v2/project/gh/influxdata/telegraf/${BUILD_NUM}/artifacts" \ - -o artifacts || exit 1 - -cat artifacts | jq -r '.items[] | "\(.url) \(.path|ltrimstr("build/dist/"))"' > manifest - -while read url path; -do - echo $url - curl -s -S -L -o "$path" "$url" && - sha256sum "$path" > "$path.DIGESTS" && - gpg --armor --detach-sign "$path.DIGESTS" && - gpg --armor --detach-sign "$path" || exit 1 -done < manifest - -echo -cat *.DIGESTS -echo - -arch() { - case ${1} in - *i386.*) - echo i386;; - *armel.*) - echo armel;; - *armv6hl.*) - echo armv6hl;; - *armhf.*) - echo armhf;; - *arm64.* | *aarch64.*) - echo arm64;; - *amd64.* | *x86_64.*) - echo amd64;; - *s390x.*) - echo s390x;; - *ppc64le.*) - echo ppc64le;; - *mipsel.*) - echo mipsel;; - *mips.*) - echo mips;; - *) - echo unknown - esac -} - -platform() { - case ${1} in - *".rpm") - echo Centos;; - *".deb") - echo Debian;; - *"linux"*) - echo Linux;; - *"freebsd"*) - echo FreeBSD;; - *"darwin"*) - echo Mac OS X;; - *"windows"*) - echo Windows;; - *) - echo unknown;; - esac -} - -echo "Arch | Platform | Package | SHA256" -echo "---| --- | --- | ---" -while read url path; -do - echo "$(arch ${path}) | $(platform ${path}) | [\`${path}\`](https://dl.influxdata.com/telegraf/releases/${path}) | \`$(sha256sum ${path} | cut -f1 -d' ')\`" -done < manifest -echo "" - -package="$(grep *_darwin_amd64.dmg manifest | cut -f2 -d' ')" -cat -< Date: Mon, 1 Nov 2021 19:53:23 +0100 Subject: [PATCH 015/133] fix: correct timezone in intel rdt plugin (#10026) --- plugins/inputs/intel_rdt/intel_rdt.go | 38 +++-- plugins/inputs/intel_rdt/intel_rdt_test.go | 17 +- plugins/inputs/intel_rdt/publisher.go | 100 +++++------ plugins/inputs/intel_rdt/publisher_test.go | 182 ++++++++++----------- 4 files changed, 170 insertions(+), 167 deletions(-) diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 486a13c98c535..d354bb855aacf 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -66,6 +66,12 @@ type processMeasurement struct { measurement string } +type splitCSVLine struct { + timeValue string + metricsValues []string + coreOrPIDsValues []string +} + // All gathering is done in the Start function func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { return nil @@ -230,8 +236,8 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s } for _, availableProcess := range availableProcesses { if choice.Contains(availableProcess.Name, providedProcesses) { - PID := availableProcess.PID - mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", PID) + "," + pid := availableProcess.PID + mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", pid) + "," } } for key := range mapProcessPIDs { @@ -258,7 +264,7 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss r.wg.Add(1) defer r.wg.Done() - cmd := exec.Command(r.PqosPath, append(args)...) + cmd := exec.Command(r.PqosPath, args...) if r.UseSudo { // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` @@ -327,13 +333,13 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati if len(r.Processes) != 0 { newMetric := processMeasurement{} - PIDs, err := findPIDsInMeasurement(out) + pids, err := findPIDsInMeasurement(out) if err != nil { r.errorChan <- err break } for processName, PIDsProcess := range processesPIDsAssociation { - if PIDs == PIDsProcess { + if pids == PIDsProcess { newMetric.name = processName newMetric.measurement = out } @@ -482,29 +488,29 @@ func validateAndParseCores(coreStr string) ([]int, error) { func findPIDsInMeasurement(measurements string) (string, error) { // to distinguish PIDs from Cores (PIDs should be in quotes) var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`) - PIDsMatch := insideQuoteRegex.FindStringSubmatch(measurements) - if len(PIDsMatch) < 2 { + pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements) + if len(pidsMatch) < 2 { return "", fmt.Errorf("cannot find PIDs in measurement line") } - PIDs := PIDsMatch[1] - return PIDs, nil + pids := pidsMatch[1] + return pids, nil } -func splitCSVLineIntoValues(line string) (timeValue string, metricsValues, coreOrPIDsValues []string, err error) { +func splitCSVLineIntoValues(line string) (splitCSVLine, error) { values, err := splitMeasurementLine(line) if err != nil { - return "", nil, nil, err + return splitCSVLine{}, err } - timeValue = values[0] + timeValue := values[0] // Because pqos csv format is broken when many cores are involved in PID or // group of PIDs, there is need to work around it. E.g.: // Time,PID,Core,IPC,LLC Misses,LLC[KB],MBL[MB/s],MBR[MB/s],MBT[MB/s] // 2020-08-12 13:34:36,"45417,29170,",37,44,0.00,0,0.0,0.0,0.0,0.0 - metricsValues = values[len(values)-numberOfMetrics:] - coreOrPIDsValues = values[1 : len(values)-numberOfMetrics] + metricsValues := values[len(values)-numberOfMetrics:] + coreOrPIDsValues := values[1 : len(values)-numberOfMetrics] - return timeValue, metricsValues, coreOrPIDsValues, nil + return splitCSVLine{timeValue, metricsValues, coreOrPIDsValues}, nil } func validateInterval(interval int32) error { @@ -523,7 +529,7 @@ func splitMeasurementLine(line string) ([]string, error) { } func parseTime(value string) (time.Time, error) { - timestamp, err := time.Parse(timestampFormat, value) + timestamp, err := time.ParseInLocation(timestampFormat, value, time.Local) if err != nil { return time.Time{}, err } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 1eecbc5018125..18dd2e93aa1c1 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -52,18 +52,18 @@ func TestSplitCSVLineIntoValues(t *testing.T) { expectedMetricsValue := []string{"0.00", "0", "0.0", "0.0", "0.0", "0.0"} expectedCoreOrPidsValue := []string{"\"45417", "29170\"", "37", "44"} - timeValue, metricsValue, coreOrPidsValue, err := splitCSVLineIntoValues(line) + splitCSV, err := splitCSVLineIntoValues(line) assert.Nil(t, err) - assert.Equal(t, expectedTimeValue, timeValue) - assert.Equal(t, expectedMetricsValue, metricsValue) - assert.Equal(t, expectedCoreOrPidsValue, coreOrPidsValue) + assert.Equal(t, expectedTimeValue, splitCSV.timeValue) + assert.Equal(t, expectedMetricsValue, splitCSV.metricsValues) + assert.Equal(t, expectedCoreOrPidsValue, splitCSV.coreOrPIDsValues) wrongLine := "2020-08-12 13:34:36,37,44,0.00,0,0.0" - timeValue, metricsValue, coreOrPidsValue, err = splitCSVLineIntoValues(wrongLine) + splitCSV, err = splitCSVLineIntoValues(wrongLine) assert.NotNil(t, err) - assert.Equal(t, "", timeValue) - assert.Nil(t, nil, metricsValue) - assert.Nil(t, nil, coreOrPidsValue) + assert.Equal(t, "", splitCSV.timeValue) + assert.Nil(t, nil, splitCSV.metricsValues) + assert.Nil(t, nil, splitCSV.coreOrPIDsValues) } func TestFindPIDsInMeasurement(t *testing.T) { @@ -107,7 +107,6 @@ func TestCreateArgsCores(t *testing.T) { assert.EqualValues(t, expected, result) cores = []string{"1,2,3", "4,5,6"} - expected = "--mon-core=" expectedPrefix := "--mon-core=" expectedSubstring := "all:[1,2,3];mbt:[1,2,3];" expectedSubstring2 := "all:[4,5,6];mbt:[4,5,6];" diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index a567e1aacb1fa..4fdb91dc7b128 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -5,12 +5,26 @@ package intel_rdt import ( "context" + "errors" "strings" "time" "github.com/influxdata/telegraf" ) +type parsedCoresMeasurement struct { + cores string + values []float64 + time time.Time +} + +type parsedProcessMeasurement struct { + process string + cores string + values []float64 + time time.Time +} + // Publisher for publish new RDT metrics to telegraf accumulator type Publisher struct { acc telegraf.Accumulator @@ -50,48 +64,48 @@ func (p *Publisher) publish(ctx context.Context) { } func (p *Publisher) publishCores(measurement string) { - coresString, values, timestamp, err := parseCoresMeasurement(measurement) + parsedCoresMeasurement, err := parseCoresMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorCores(coresString, values, timestamp) + p.addToAccumulatorCores(parsedCoresMeasurement) } func (p *Publisher) publishProcess(measurement processMeasurement) { - process, coresString, values, timestamp, err := parseProcessesMeasurement(measurement) + parsedProcessMeasurement, err := parseProcessesMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorProcesses(process, coresString, values, timestamp) + p.addToAccumulatorProcesses(parsedProcessMeasurement) } -func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { +func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error) { var values []float64 - timeValue, metricsValues, cores, err := splitCSVLineIntoValues(measurements) + splitCSV, err := splitCSVLineIntoValues(measurements) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } // change string slice to one string and separate it by coma - coresString := strings.Join(cores, ",") + coresString := strings.Join(splitCSV.coreOrPIDsValues, ",") // trim unwanted quotes coresString = strings.Trim(coresString, "\"") - for _, metric := range metricsValues { + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } values = append(values, parsedValue) } - return coresString, values, timestamp, nil + return parsedCoresMeasurement{coresString, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -102,41 +116,47 @@ func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, tags := map[string]string{} fields := make(map[string]interface{}) - tags["cores"] = cores + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } } -func parseProcessesMeasurement(measurement processMeasurement) (string, string, []float64, time.Time, error) { - var values []float64 - timeValue, metricsValues, coreOrPidsValues, pids, err := parseProcessMeasurement(measurement.measurement) +func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMeasurement, error) { + splitCSV, err := splitCSVLineIntoValues(measurement.measurement) + if err != nil { + return parsedProcessMeasurement{}, err + } + pids, err := findPIDsInMeasurement(measurement.measurement) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err + } + lenOfPIDs := len(strings.Split(pids, ",")) + if lenOfPIDs > len(splitCSV.coreOrPIDsValues) { + return parsedProcessMeasurement{}, errors.New("detected more pids (quoted) than actual number of pids in csv line") } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } actualProcess := measurement.name - lenOfPids := len(strings.Split(pids, ",")) - cores := coreOrPidsValues[lenOfPids:] - coresString := strings.Trim(strings.Join(cores, ","), `"`) + cores := strings.Trim(strings.Join(splitCSV.coreOrPIDsValues[lenOfPIDs:], ","), `"`) - for _, metric := range metricsValues { + var values []float64 + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } values = append(values, parsedValue) } - return actualProcess, coresString, values, timestamp, nil + return parsedProcessMeasurement{actualProcess, cores, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -147,23 +167,11 @@ func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metr tags := map[string]string{} fields := make(map[string]interface{}) - tags["process"] = process - tags["cores"] = cores + tags["process"] = measurement.process + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) - } -} - -func parseProcessMeasurement(measurements string) (string, []string, []string, string, error) { - timeValue, metricsValues, coreOrPidsValues, err := splitCSVLineIntoValues(measurements) - if err != nil { - return "", nil, nil, "", err - } - pids, err := findPIDsInMeasurement(measurements) - if err != nil { - return "", nil, nil, "", err + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } - return timeValue, metricsValues, coreOrPidsValues, pids, nil } diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 7db71e9ac5afa..2529a2235a1b9 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -37,29 +37,29 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.Nil(t, err) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) t.Run("not valid measurement string", func(t *testing.T) { measurement := "not, valid, measurement" - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid values string", func(t *testing.T) { measurement := fmt.Sprintf("%s,%s,%s,%s,%f,%f,%f,%f", @@ -72,12 +72,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid timestamp format", func(t *testing.T) { invalidTimestamp := "2020-08-12-21 13:34:" @@ -91,12 +91,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) } @@ -119,44 +119,36 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) newMeasurement := processMeasurement{ name: processName, measurement: measurement, } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + result, err := parseProcessesMeasurement(newMeasurement) assert.Nil(t, err) - assert.Equal(t, processName, actualProcess) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, processName, result.process) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) - t.Run("not valid measurement string", func(t *testing.T) { - processName := "process_name" - measurement := "invalid,measurement,format" - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid timestamp format", func(t *testing.T) { - invalidTimestamp := "2020-20-20-31" - measurement := fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", + invalidTimestamp := "2020-20-20-31" + negativeTests := []struct { + name string + measurement string + }{{ + name: "not valid measurement string", + measurement: "invalid,measurement,format", + }, { + name: "not valid timestamp format", + measurement: fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", invalidTimestamp, pids, cores, @@ -165,44 +157,42 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["LLC"], metricsValues["MBL"], metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid values string", func(t *testing.T) { - measurement := fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", - timestamp, - pids, - cores, - "1##", - "da", - metricsValues["LLC"], - metricsValues["MBL"], - metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + metricsValues["MBT"]), + }, + { + name: "not valid values string", + measurement: fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", + timestamp, + pids, + cores, + "1##", + "da", + metricsValues["LLC"], + metricsValues["MBL"], + metricsValues["MBR"], + metricsValues["MBT"]), + }, + { + name: "not valid csv line with quotes", + measurement: "0000-08-02 0:00:00,,\",,,,,,,,,,,,,,,,,,,,,,,,\",,", + }, + } - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) + for _, test := range negativeTests { + t.Run(test.name, func(t *testing.T) { + newMeasurement := processMeasurement{ + name: processName, + measurement: test.measurement, + } + result, err := parseProcessesMeasurement(newMeasurement) + + assert.NotNil(t, err) + assert.Equal(t, "", result.process) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) + }) + } } func TestAddToAccumulatorCores(t *testing.T) { @@ -212,9 +202,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetrics { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -226,9 +216,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetricsShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -244,9 +234,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcesses { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -259,9 +249,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcessesShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) From 934db67c2b820da97cbc504c5f2d6815596a0236 Mon Sep 17 00:00:00 2001 From: Scott Anderson Date: Mon, 1 Nov 2021 13:45:21 -0600 Subject: [PATCH 016/133] fix: update influxdb input schema documentation (#10029) --- plugins/inputs/influxdb/README.md | 264 ++++++++++++++++++++++++------ 1 file changed, 216 insertions(+), 48 deletions(-) diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 9a2db484601fd..8ba686aab1bd1 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -41,54 +41,222 @@ InfluxDB-formatted endpoints. See below for more information. **Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: -- influxdb - - n_shards: The total number of shards in the specified database. -- influxdb_ae _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. -- influxdb_cluster _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. -- influxdb_cq: The metrics related to continuous queries (CQs). -- influxdb_database: The database metrics are being collected from. -- influxdb_hh _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. -- influxdb_hh_database _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. -- influxdb_hh_processor _(Enterprise Only)_ : Statistics stored for a single queue (shard). -- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. -- influxdb_measurement: The measurement that metrics are collected from. -- influxdb_memstats: Statistics about the memory allocator in the specified database. - - heap_inuse: The number of bytes in in-use spans. - - heap_released: The number of bytes of physical memory returned to the OS. - - mspan_inuse: The number of bytes in in-use mspans. - - total_alloc: The cumulative bytes allocated for heap objects. - - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. - - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.) - - frees: The cumulative number of freed (live) heap objects. - - heap_idle: The number of bytes of idle heap objects. - - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds. - - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. - - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. - - mcache_sys: The bytes of memory obtained from the OS for mcache structures. - - next_gc: The target heap size of the next garbage collection cycle. - - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle. - - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys. - - alloc: The currently allocated number of bytes of heap objects. - - stack_inuse: The number of bytes in in-use stacks. - - stack_sys: The total number of bytes of memory obtained from the stack in use. - - buck_hash_sys: The bytes of memory in profiling bucket hash tables. - - gc_sys: The bytes of memory in garbage collection metadata. - - num_gc: The number of completed garbage collection cycles. - - heap_alloc: The size, in bytes, of all heap objects. - - heap_objects: The number of allocated heap objects. - - mspan_sys: The bytes of memory obtained from the OS for mspan. - - mcache_inuse: The bytes of allocated mcache structures. - - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). -- influxdb_queryExecutor: Query Executor metrics of the InfluxDB engine. -- influxdb_rpc _(Enterprise Only)_ : Statistics are related to the use of RPC calls within InfluxDB Enterprise clusters. -- influxdb_runtime: The shard metrics are collected from. -- influxdb_shard: The shard metrics are collected from. -- influxdb_subscriber: The InfluxDB subscription that metrics are collected from. -- influxdb_tsm1_cache: The TSM cache that metrics are collected from. -- influxdb_tsm1_engine: The TSM storage engine that metrics are collected from. -- influxdb_tsm1_filestore: The TSM file store that metrics are collected from. -- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. -- influxdb_write: The total writes to the specified database. +- **influxdb_ae** _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. + - **bytesRx**: Number of bytes received by the data node. + - **errors**: Total number of anti-entropy jobs that have resulted in errors. + - **jobs**: Total number of jobs executed by the data node. + - **jobsActive**: Number of active (currently executing) jobs. +- **influxdb_cluster** _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. + - **copyShardReq**: Number of internal requests made to copy a shard from one data node to another. + - **createIteratorReq**: Number of read requests from other data nodes in the cluster. + - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. + - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. + - **iteratorCostReq**: Number of internal requests for iterator cost. + - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. + - **writeShardFail**: Total number of internal write requests from a remote node that failed. + - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. + - **writeShardReq**: Number of internal write requests from a remote data node, regardless of success. +- **influxdb_cq**: Metrics related to continuous queries (CQs). + - **queryFail**: Total number of continuous queries that executed but failed. + - **queryOk**: Total number of continuous queries that executed successfully. +- **influxdb_database**: Database metrics are collected from. + - **numMeasurements**: Current number of measurements in the specified database. + - **numSeries**: Current series cardinality of the specified database. +- **influxdb_hh** _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. + - **writeShardReq**: Number of initial write requests handled by the hinted handoff engine for a remote node. + - **writeShardReqPoints**: Number of write requests for each point in the initial request to the hinted handoff engine for a remote node. +- **influxdb_hh_database** _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_httpd**: Metrics related to the InfluxDB HTTP server. + - **authFail**: Number of HTTP requests that were aborted due to authentication being required, but not supplied or incorrect. + - **clientError**: Number of HTTP responses due to client errors, with a 4XX HTTP status code. + - **fluxQueryReq**: Number of Flux query requests served. + - **fluxQueryReqDurationNs**: Duration (wall-time), in nanoseconds, spent executing Flux query requests. + - **pingReq**: Number of times InfluxDB HTTP server served the /ping HTTP endpoint. + - **pointsWrittenDropped**: Number of points dropped by the storage engine. + - **pointsWrittenFail**: Number of points accepted by the HTTP /write endpoint, but unable to be persisted. + - **pointsWrittenOK**: Number of points successfully accepted and persisted by the HTTP /write endpoint. + - **promReadReq**: Number of read requests to the Prometheus /read endpoint. + - **promWriteReq**: Number of write requests to the Prometheus /write endpoint. + - **queryReq**: Number of query requests. + - **queryReqDurationNs**: Total query request duration, in nanosecond (ns). + - **queryRespBytes**: Total number of bytes returned in query responses. + - **recoveredPanics**: Total number of panics recovered by the HTTP handler. + - **req**: Total number of HTTP requests served. + - **reqActive**: Number of currently active requests. + - **reqDurationNs**: Duration (wall time), in nanoseconds, spent inside HTTP requests. + - **serverError**: Number of HTTP responses due to server errors. + - **statusReq**: Number of status requests served using the HTTP /status endpoint. + - **valuesWrittenOK**: Number of values (fields) successfully accepted and persisted by the HTTP /write endpoint. + - **writeReq**: Number of write requests served using the HTTP /write endpoint. + - **writeReqActive**: Number of currently active write requests. + - **writeReqBytes**: Total number of bytes of line protocol data received by write requests, using the HTTP /write endpoint. + - **writeReqDurationNs**: Duration, in nanoseconds, of write requests served using the /write HTTP endpoint. +- **influxdb_memstats**: Statistics about the memory allocator in the specified database. + - **Alloc**: Number of bytes allocated to heap objects. + - **BuckHashSys**: Number of bytes of memory in profiling bucket hash tables. + - **Frees**: Cumulative count of heap objects freed. + - **GCCPUFraction**: fraction of InfluxDB's available CPU time used by the garbage collector (GC) since InfluxDB started. + - **GCSys**: Number of bytes of memory in garbage collection metadata. + - **HeapAlloc**: Number of bytes of allocated heap objects. + - **HeapIdle**: Number of bytes in idle (unused) spans. + - **HeapInuse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **LastGC**: Time the last garbage collection finished. + - **Lookups**: Number of pointer lookups performed by the runtime. + - **MCacheInuse**: Number of bytes of allocated mcache structures. + - **MCacheSys**: Number of bytes of memory obtained from the OS for mcache structures. + - **MSpanInuse**: Number of bytes of allocated mspan structures. + - **MSpanSys**: Number of bytes of memory obtained from the OS for mspan structures. + - **Mallocs**: Cumulative count of heap objects allocated. + - **NextGC**: Target heap size of the next GC cycle. + - **NumForcedGC**: Number of GC cycles that were forced by the application calling the GC function. + - **NumGC**: Number of completed GC cycles. + - **OtherSys**: Number of bytes of memory in miscellaneous off-heap runtime allocations. + - **PauseTotalNs**: Cumulative nanoseconds in GC stop-the-world pauses since the program started. + - **StackInuse**: Number of bytes in stack spans. + - **StackSys**: Number of bytes of stack memory obtained from the OS. + - **Sys**: Total bytes of memory obtained from the OS. + - **TotalAlloc**: Cumulative bytes allocated for heap objects. +- **influxdb_queryExecutor**: Metrics related to usage of the Query Executor of the InfluxDB engine. + - **queriesActive**: Number of active queries currently being handled. + - **queriesExecuted**: Number of queries executed (started). + - **queriesFinished**: Number of queries that have finished executing. + - **queryDurationNs**: Total duration, in nanoseconds, of executed queries. + - **recoveredPanics**: Number of panics recovered by the Query Executor. +- **influxdb_rpc** _(Enterprise Only)_ : Statistics related to the use of RPC calls within InfluxDB Enterprise clusters. + - **idleStreams**: Number of idle multiplexed streams across all live TCP connections. + - **liveConnections**: Current number of live TCP connections to other nodes. + - **liveStreams**: Current number of live multiplexed streams across all live TCP connections. + - **rpcCalls**: Total number of RPC calls made to remote nodes. + - **rpcFailures**: Total number of RPC failures, which are RPCs that did not recover. + - **rpcReadBytes**: Total number of RPC bytes read. + - **rpcRetries**: Total number of RPC calls that retried at least once. + - **rpcWriteBytes**: Total number of RPC bytes written. + - **singleUse**: Total number of single-use connections opened using Dial. + - **singleUseOpen**: Number of single-use connections currently open. + - **totalConnections**: Total number of TCP connections that have been established. + - **totalStreams**: Total number of streams established. +- **influxdb_runtime**: Subset of memstat record statistics for the Go memory allocator. + - **Alloc**: Currently allocated number of bytes of heap objects. + - **Frees**: Cumulative number of freed (live) heap objects. + - **HeapAlloc**: Size, in bytes, of all heap objects. + - **HeapIdle**: Number of bytes of idle heap objects. + - **HeapInUse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. + - **Lookups**: Number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. + - **Mallocs**: Total number of heap objects allocated. The total number of live objects is Frees. + - **NumGC**: Number of completed GC (garbage collection) cycles. + - **NumGoroutine**: Total number of Go routines. + - **PauseTotalNs**: Total duration, in nanoseconds, of total GC (garbage collection) pauses. + - **Sys**: Total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. + - **TotalAlloc**: Total number of bytes allocated for heap objects. This statistic does not decrease when objects are freed. +- **influxdb_shard**: Metrics related to InfluxDB shards. + - **diskBytes**: Size, in bytes, of the shard, including the size of the data directory and the WAL directory. + - **fieldsCreate**: Number of fields created. + - **indexType**: Type of index inmem or tsi1. + - **n_shards**: Total number of shards in the specified database. + - **seriesCreate**: Number of series created. + - **writeBytes**: Number of bytes written to the shard. + - **writePointsDropped**: Number of requests to write points t dropped from a write. + - **writePointsErr**: Number of requests to write points that failed to be written due to errors. + - **writePointsOk**: Number of points written successfully. + - **writeReq**: Total number of write requests. + - **writeReqErr**: Total number of write requests that failed due to errors. + - **writeReqOk**: Total number of successful write requests. +- **influxdb_subscriber**: InfluxDB subscription metrics. + - **createFailures**: Number of subscriptions that failed to be created. + - **pointsWritten**: Total number of points that were successfully written to subscribers. + - **writeFailures**: Total number of batches that failed to be written to subscribers. +- **influxdb_tsm1_cache**: TSM cache metrics. + - **cacheAgeMs**: Duration, in milliseconds, since the cache was last snapshotted at sample time. + - **cachedBytes**: Total number of bytes that have been written into snapshots. + - **diskBytes**: Size, in bytes, of on-disk snapshots. + - **memBytes**: Size, in bytes, of in-memory cache. + - **snapshotCount**: Current level (number) of active snapshots. + - **WALCompactionTimeMs**: Duration, in milliseconds, that the commit lock is held while compacting snapshots. + - **writeDropped**: Total number of writes dropped due to timeouts. + - **writeErr**: Total number of writes that failed. + - **writeOk**: Total number of successful writes. +- **influxdb_tsm1_engine**: TSM storage engine metrics. + - **cacheCompactionDuration** Duration (wall time), in nanoseconds, spent in cache compactions. + - **cacheCompactionErr** Number of cache compactions that have failed due to errors. + - **cacheCompactions** Total number of cache compactions that have ever run. + - **cacheCompactionsActive** Number of cache compactions that are currently running. + - **tsmFullCompactionDuration** Duration (wall time), in nanoseconds, spent in full compactions. + - **tsmFullCompactionErr** Total number of TSM full compactions that have failed due to errors. + - **tsmFullCompactionQueue** Current number of pending TMS Full compactions. + - **tsmFullCompactions** Total number of TSM full compactions that have ever run. + - **tsmFullCompactionsActive** Number of TSM full compactions currently running. + - **tsmLevel1CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 1 compactions. + - **tsmLevel1CompactionErr** Total number of TSM level 1 compactions that have failed due to errors. + - **tsmLevel1CompactionQueue** Current number of pending TSM level 1 compactions. + - **tsmLevel1Compactions** Total number of TSM level 1 compactions that have ever run. + - **tsmLevel1CompactionsActive** Number of TSM level 1 compactions that are currently running. + - **tsmLevel2CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 2 compactions. + - **tsmLevel2CompactionErr** Number of TSM level 2 compactions that have failed due to errors. + - **tsmLevel2CompactionQueue** Current number of pending TSM level 2 compactions. + - **tsmLevel2Compactions** Total number of TSM level 2 compactions that have ever run. + - **tsmLevel2CompactionsActive** Number of TSM level 2 compactions that are currently running. + - **tsmLevel3CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 3 compactions. + - **tsmLevel3CompactionErr** Number of TSM level 3 compactions that have failed due to errors. + - **tsmLevel3CompactionQueue** Current number of pending TSM level 3 compactions. + - **tsmLevel3Compactions** Total number of TSM level 3 compactions that have ever run. + - **tsmLevel3CompactionsActive** Number of TSM level 3 compactions that are currently running. + - **tsmOptimizeCompactionDuration** Duration (wall time), in nanoseconds, spent during TSM optimize compactions. + - **tsmOptimizeCompactionErr** Total number of TSM optimize compactions that have failed due to errors. + - **tsmOptimizeCompactionQueue** Current number of pending TSM optimize compactions. + - **tsmOptimizeCompactions** Total number of TSM optimize compactions that have ever run. + - **tsmOptimizeCompactionsActive** Number of TSM optimize compactions that are currently running. +- **influxdb_tsm1_filestore**: The TSM file store metrics. + - **diskBytes**: Size, in bytes, of disk usage by the TSM file store. + - **numFiles**: Total number of files in the TSM file store. +- **influxdb_tsm1_wal**: The TSM Write Ahead Log (WAL) metrics. + - **currentSegmentDiskBytes**: Current size, in bytes, of the segment disk. + - **oldSegmentDiskBytes**: Size, in bytes, of the segment disk. + - **writeErr**: Number of writes that failed due to errors. + - **writeOK**: Number of writes that succeeded. +- **influxdb_write**: Metrics related to InfluxDB writes. + - **pointReq**: Total number of points requested to be written. + - **pointReqHH** _(Enterprise only)_: Total number of points received for write by this node and then enqueued into hinted handoff for the destination node. + - **pointReqLocal** _(Enterprise only)_: Total number of point requests that have been attempted to be written into a shard on the same (local) node. + - **pointReqRemote** _(Enterprise only)_: Total number of points received for write by this node but needed to be forwarded into a shard on a remote node. + - **pointsWrittenOK**: Number of points written to the HTTP /write endpoint and persisted successfully. + - **req**: Total number of batches requested to be written. + - **subWriteDrop**: Total number of batches that failed to be sent to the subscription dispatcher. + - **subWriteOk**: Total number of batches successfully sent to the subscription dispatcher. + - **valuesWrittenOK**: Number of values (fields) written to the HTTP /write endpoint and persisted successfully. + - **writeDrop**: Total number of write requests for points that have been dropped due to timestamps not matching any existing retention policies. + - **writeError**: Total number of batches of points that were not successfully written, due to a failure to write to a local or remote shard. + - **writeOk**: Total number of batches of points written at the requested consistency level. + - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. + - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. ### Example Output: From e6b107b0621fb3b3ada6670b35f208ab68b76ccd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 2 Nov 2021 15:42:22 +0100 Subject: [PATCH 017/133] fix: Linter fixes for plugins/inputs/[n-o]* (#10011) --- plugins/inputs/nats/nats.go | 9 ++-- plugins/inputs/neptune_apex/neptune_apex.go | 4 +- plugins/inputs/net_response/net_response.go | 25 +++++---- plugins/inputs/nginx/nginx_test.go | 6 +-- .../nginx_plus_api/nginx_plus_api_metrics.go | 10 ++-- .../nginx_upstream_check.go | 12 ++--- plugins/inputs/nsd/nsd.go | 16 +++--- plugins/inputs/nsd/nsd_test.go | 15 +++--- .../inputs/nsq_consumer/nsq_consumer_test.go | 21 ++++---- plugins/inputs/nstat/nstat.go | 12 ++--- plugins/inputs/ntpq/ntpq.go | 2 +- plugins/inputs/ntpq/ntpq_test.go | 30 +++++------ plugins/inputs/opcua/opcua_client.go | 9 ++-- plugins/inputs/opcua/opcua_client_test.go | 23 ++++---- plugins/inputs/openldap/openldap.go | 11 ++-- plugins/inputs/openldap/openldap_test.go | 28 +++++----- plugins/inputs/openntpd/openntpd_test.go | 53 +++++++++---------- plugins/inputs/opensmtpd/opensmtpd_test.go | 11 ++-- .../opentelemetry/opentelemetry_test.go | 21 ++++---- .../inputs/openweathermap/openweathermap.go | 16 +++--- 20 files changed, 169 insertions(+), 165 deletions(-) diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 7144355096b4e..c9e99824d4de5 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -11,10 +11,11 @@ import ( "path" "time" + gnatsd "github.com/nats-io/nats-server/v2/server" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { @@ -41,16 +42,16 @@ func (n *Nats) Description() string { } func (n *Nats) Gather(acc telegraf.Accumulator) error { - url, err := url.Parse(n.Server) + address, err := url.Parse(n.Server) if err != nil { return err } - url.Path = path.Join(url.Path, "varz") + address.Path = path.Join(address.Path, "varz") if n.client == nil { n.client = n.createHTTPClient() } - resp, err := n.client.Get(url.String()) + resp, err := n.client.Get(address.String()) if err != nil { return err } diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index c2bb05384d7c8..a8934bd01ee94 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -245,7 +245,7 @@ func findProbe(probe string, probes []probe) int { // returns a time.Time struct. func parseTime(val string, tz float64) (time.Time, error) { // Magic time constant from https://golang.org/pkg/time/#Parse - const TimeLayout = "01/02/2006 15:04:05 -0700" + const timeLayout = "01/02/2006 15:04:05 -0700" // Timezone offset needs to be explicit sign := '+' @@ -256,7 +256,7 @@ func parseTime(val string, tz float64) (time.Time, error) { // Build a time string with the timezone in a format Go can parse. tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) ts := fmt.Sprintf("%s %s", val, tzs) - t, err := time.Parse(TimeLayout, ts) + t, err := time.Parse(timeLayout, ts) if err != nil { return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) } diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index a7fcec4353c81..043a3c44760ed 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -17,10 +17,10 @@ type ResultType uint64 const ( Success ResultType = 0 - Timeout = 1 - ConnectionFailed = 2 - ReadFailed = 3 - StringMismatch = 4 + Timeout ResultType = 1 + ConnectionFailed ResultType = 2 + ReadFailed ResultType = 3 + StringMismatch ResultType = 4 ) // NetResponse struct @@ -120,8 +120,8 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er setResult(ReadFailed, fields, tags, n.Expect) } else { // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(data) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -186,8 +186,8 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er } // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(buf)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(string(buf)) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -232,22 +232,25 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} var returnTags map[string]string + // Gather data - if n.Protocol == "tcp" { + switch n.Protocol { + case "tcp": returnTags, fields, err = n.TCPGather() if err != nil { return err } tags["protocol"] = "tcp" - } else if n.Protocol == "udp" { + case "udp": returnTags, fields, err = n.UDPGather() if err != nil { return err } tags["protocol"] = "udp" - } else { + default: return errors.New("bad protocol") } + // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index db30304dcc15a..5a947e7e202e0 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const nginxSampleResponse = ` @@ -33,7 +33,7 @@ func TestNginxTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 5cd7e76aec439..81f747d86d825 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -49,11 +49,11 @@ func addError(acc telegraf.Accumulator, err error) { } func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) - resp, err := n.client.Get(url) + address := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) + resp, err := n.client.Get(address) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", address, err) } defer resp.Body.Close() @@ -64,7 +64,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { // features are either optional, or only available in some versions return nil, errNotFound default: - return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", address, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] @@ -77,7 +77,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { return body, nil default: - return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + return nil, fmt.Errorf("%s returned unexpected content type %s", address, contentType) } } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 42e0cab62d53e..8ad8cc91e8a9e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -121,7 +121,7 @@ func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error { +func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{}) error { var method string if check.Method != "" { method = check.Method @@ -129,7 +129,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e method = "GET" } - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest(method, address, nil) if err != nil { return err } @@ -153,7 +153,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -187,10 +187,10 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error return nil } -func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { +func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJSONData(url, checkData) + err := check.gatherJSONData(address, checkData) if err != nil { return err } @@ -201,7 +201,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegr "type": server.Type, "name": server.Name, "port": strconv.Itoa(int(server.Port)), - "url": url, + "url": address, } fields := map[string]interface{}{ diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index f75f700eaa2f9..6c8998129cf90 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -61,20 +61,20 @@ func (s *NSD) SampleConfig() string { } // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server string, configFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if server != "" { + host, port, err := net.SplitHostPort(server) if err == nil { - Server = host + "@" + port + server = host + "@" + port } - cmdArgs = append([]string{"-s", Server}, cmdArgs...) + cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) } cmd := exec.Command(cmdName, cmdArgs...) @@ -119,7 +119,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { fieldValue, err := strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v", + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) continue } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index d64cad7dcea63..74f4a14cf96fa 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,16 +3,13 @@ package nsd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,13 +23,13 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("nsd")) - assert.True(t, acc.HasMeasurement("nsd_servers")) + require.True(t, acc.HasMeasurement("nsd")) + require.True(t, acc.HasMeasurement("nsd_servers")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 99, acc.NFields()) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 99, acc.NFields()) acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index d5086862bbf7e..4c6d944746440 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -11,10 +11,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" "github.com/nsqio/go-nsq" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) // This test is modeled after the kafka consumer integration test @@ -36,7 +37,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) { } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") - newMockNSQD(script, addr.String()) + newMockNSQD(t, script, addr.String()) consumer := &NSQConsumer{ Log: testutil.Logger{}, @@ -76,6 +77,8 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { ticker := time.NewTicker(5 * time.Millisecond) defer ticker.Stop() counter := 0 + + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: @@ -89,16 +92,15 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { } } -func newMockNSQD(script []instruction, addr string) *mockNSQD { +func newMockNSQD(t *testing.T, script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) - } + require.NoError(t, err, "listen (%s) failed", n.tcpAddr.String()) + n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) @@ -139,6 +141,7 @@ func (n *mockNSQD) handle(conn net.Conn) { buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { + //nolint:revive // log.Fatalf called intentionally log.Fatalf("ERROR: failed to read protocol version - %s", err) } @@ -171,14 +174,14 @@ func (n *mockNSQD) handle(conn net.Conn) { l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } case bytes.Equal(params[0], []byte("RDY")): diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 4408b8f728579..b5ada855479c9 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -138,10 +138,10 @@ func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { if bytes.Equal(fields[i+1], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(fields[i])] = int64(0) - continue } + + entries[string(fields[i])] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) @@ -176,10 +176,10 @@ func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { if bytes.Equal(metrics[j], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(append(prefix, headers[j]...))] = int64(0) - continue } + + entries[string(append(prefix, headers[j]...))] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(metrics[j]), 10, 64) diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index a952783a344a6..6b924fc52298a 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -50,7 +50,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Due to problems with a parsing, we have to use regexp expression in order // to remove string that starts from '(' and ends with space // see: https://github.com/influxdata/telegraf/issues/2386 - reg, err := regexp.Compile("\\s+\\([\\S]*") + reg, err := regexp.Compile(`\s+\([\S]*`) if err != nil { return err } diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index b0db77e45784f..54d4e10e717ac 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -20,7 +20,7 @@ func TestSingleNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -49,7 +49,7 @@ func TestBadIntNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -77,7 +77,7 @@ func TestBadFloatNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(2), @@ -105,7 +105,7 @@ func TestDaysNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(172800), @@ -134,7 +134,7 @@ func TestHoursNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(7200), @@ -163,7 +163,7 @@ func TestMinutesNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(120), @@ -192,7 +192,7 @@ func TestBadWhenNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(256), @@ -222,7 +222,7 @@ func TestParserNTPQ(t *testing.T) { n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(64), @@ -285,7 +285,7 @@ func TestMultiNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "delay": float64(54.033), @@ -329,7 +329,7 @@ func TestBadHeaderNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -357,7 +357,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -378,13 +378,13 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { func TestFailedNTPQ(t *testing.T) { tt := tester{ ret: []byte(singleNTPQ), - err: fmt.Errorf("Test failure"), + err: fmt.Errorf("test failure"), } n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) } // It is possible for the output of ntqp to be missing the refid column. This diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index d59adc453ba8b..97bfa3709c113 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -10,6 +10,7 @@ import ( "github.com/gopcua/opcua" "github.com/gopcua/opcua/ua" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" @@ -242,14 +243,14 @@ func (o *OpcUA) validateEndpoint() error { //search security policy type switch o.SecurityPolicy { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": - break + // Valid security policy type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) } //search security mode type switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": - break + // Valid security mode type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) } @@ -384,7 +385,7 @@ func (o *OpcUA) validateOPCTags() error { //search identifier type switch node.tag.IdentifierType { case "s", "i", "g", "b": - break + // Valid identifier type - do nothing. default: return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) } @@ -468,7 +469,7 @@ func (o *OpcUA) setupOptions() error { if o.Certificate == "" && o.PrivateKey == "" { if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, 365*24*time.Hour) if err != nil { return err } diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 4c7805578b114..27bfc1ecf4342 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -6,11 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type OPCTags struct { @@ -137,30 +136,30 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] func TestTagsSliceToMap(t *testing.T) { m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) - assert.NoError(t, err) - assert.Len(t, m, 2) - assert.Equal(t, m["foo"], "bar") - assert.Equal(t, m["baz"], "bat") + require.NoError(t, err) + require.Len(t, m, 2) + require.Equal(t, m["foo"], "bar") + require.Equal(t, m["baz"], "bat") } func TestTagsSliceToMap_twoStrings(t *testing.T) { var err error _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) - assert.Error(t, err) + require.Error(t, err) _, err = tagsSliceToMap([][]string{{"foo"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_dupeKey(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_empty(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", ""}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty value"), err) _, err = tagsSliceToMap([][]string{{"", "bar"}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty name"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty name"), err) } func TestValidateOPCTags(t *testing.T) { diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index f3f7b47cf597c..7a3f766718c52 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,10 +5,11 @@ import ( "strconv" "strings" + "gopkg.in/ldap.v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/ldap.v3" ) type Openldap struct { @@ -110,13 +111,15 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - if o.TLS == "ldaps" { + + switch o.TLS { + case "ldaps": l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig) if err != nil { acc.AddError(err) return nil } - } else if o.TLS == "starttls" { + case "starttls": l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port)) if err != nil { acc.AddError(err) @@ -127,7 +130,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - } else { + default: acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index b3e171b22e9db..ac9e810f0b49e 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,10 +4,10 @@ import ( "strconv" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/ldap.v3" + + "github.com/influxdata/telegraf/testutil" ) func TestOpenldapMockResult(t *testing.T) { @@ -45,9 +45,9 @@ func TestOpenldapNoConnectionIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { @@ -108,9 +108,9 @@ func TestOpenldapInvalidSSLIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapBindIntegration(t *testing.T) { @@ -132,11 +132,11 @@ func TestOpenldapBindIntegration(t *testing.T) { } func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { - assert.Empty(t, acc.Errors, "accumulator had no errors") - assert.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") - assert.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") - assert.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") - assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") + require.Empty(t, acc.Errors, "accumulator had no errors") + require.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") + require.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") + require.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") + require.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } func TestOpenldapReverseMetricsIntegration(t *testing.T) { @@ -155,5 +155,5 @@ func TestOpenldapReverseMetricsIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") + require.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") } diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index f26419a71101e..ffca02b31a908 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,16 +3,13 @@ package openntpd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,11 +23,11 @@ func TestParseSimpleOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -57,11 +54,11 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -89,11 +86,11 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -117,11 +114,11 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -159,11 +156,11 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -187,11 +184,11 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(20)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(20)) - assert.Equal(t, acc.NFields(), 113) + require.Equal(t, acc.NFields(), 113) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index fb3afa82e0171..3b625be51cef2 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -4,9 +4,10 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { @@ -22,11 +23,11 @@ func TestFilterSomeStats(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("opensmtpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("opensmtpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 36) + require.Equal(t, acc.NFields(), 36) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 2de35bb06af50..8df1273bef8c4 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -5,10 +5,6 @@ import ( "net" "testing" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/metric" @@ -18,6 +14,10 @@ import ( "go.opentelemetry.io/otel/sdk/metric/selector/simple" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" ) func TestOpenTelemetry(t *testing.T) { @@ -72,12 +72,11 @@ func TestOpenTelemetry(t *testing.T) { // Check - assert.Empty(t, accumulator.Errors) + require.Empty(t, accumulator.Errors) - if assert.Len(t, accumulator.Metrics, 1) { - got := accumulator.Metrics[0] - assert.Equal(t, "measurement-counter", got.Measurement) - assert.Equal(t, telegraf.Counter, got.Type) - assert.Equal(t, "library-name", got.Tags["otel.library.name"]) - } + require.Len(t, accumulator.Metrics, 1) + got := accumulator.Metrics[0] + require.Equal(t, "measurement-counter", got.Measurement) + require.Equal(t, telegraf.Counter, got.Type) + require.Equal(t, "library-name", got.Tags["otel.library.name"]) } diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index fcc22343b435e..c4f2f4f032d7e 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -23,10 +23,10 @@ const ( // The limit of locations is 20. owmRequestSeveralCityID int = 20 - defaultBaseURL = "https://api.openweathermap.org/" - defaultResponseTimeout time.Duration = time.Second * 5 - defaultUnits string = "metric" - defaultLang string = "en" + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout = time.Second * 5 + defaultUnits = "metric" + defaultLang = "en" ) type OpenWeatherMap struct { @@ -38,8 +38,8 @@ type OpenWeatherMap struct { ResponseTimeout config.Duration `toml:"response_timeout"` Units string `toml:"units"` - client *http.Client - baseURL *url.URL + client *http.Client + baseParsedURL *url.URL } var sampleConfig = ` @@ -309,7 +309,7 @@ func init() { func (n *OpenWeatherMap) Init() error { var err error - n.baseURL, err = url.Parse(n.BaseURL) + n.baseParsedURL, err = url.Parse(n.BaseURL) if err != nil { return err } @@ -353,5 +353,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseURL.ResolveReference(relative).String() + return n.baseParsedURL.ResolveReference(relative).String() } From c1d4ce4dd548dd67afdd2dd1401920479198103b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 2 Nov 2021 15:49:26 +0100 Subject: [PATCH 018/133] fix: Linter fixes for plugins/inputs/m* (#10006) --- plugins/inputs/mailchimp/chimp_api.go | 30 +-- plugins/inputs/mailchimp/mailchimp.go | 13 +- plugins/inputs/mailchimp/mailchimp_test.go | 45 ++-- plugins/inputs/marklogic/marklogic.go | 8 +- plugins/inputs/mcrouter/mcrouter.go | 21 +- plugins/inputs/mcrouter/mcrouter_test.go | 20 +- plugins/inputs/mdstat/mdstat_test.go | 11 +- plugins/inputs/memcached/memcached_test.go | 6 +- plugins/inputs/mesos/mesos.go | 55 +++-- plugins/inputs/mesos/mesos_test.go | 11 +- plugins/inputs/minecraft/client.go | 10 +- plugins/inputs/mongodb/mongodb_data_test.go | 33 +-- plugins/inputs/mongodb/mongodb_server_test.go | 6 +- plugins/inputs/mongodb/mongostat.go | 8 +- plugins/inputs/mongodb/mongostat_test.go | 40 ++-- plugins/inputs/monit/monit.go | 218 +++++++++--------- plugins/inputs/monit/monit_test.go | 6 +- plugins/inputs/mqtt_consumer/README.md | 2 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 29 +-- plugins/inputs/multifile/multifile_test.go | 10 +- plugins/inputs/mysql/mysql.go | 163 +++++++------ plugins/inputs/mysql/mysql_test.go | 30 +-- 22 files changed, 400 insertions(+), 375 deletions(-) diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 2f6cecdb9e0da..71e7bcea6d535 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,12 +5,13 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "net/url" "regexp" "sync" "time" + + "github.com/influxdata/telegraf" ) const ( @@ -22,11 +23,12 @@ var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper - Debug bool + debug bool sync.Mutex url *url.URL + log telegraf.Logger } type ReportsParams struct { @@ -53,12 +55,12 @@ func (p *ReportsParams) String() string { return v.Encode() } -func NewChimpAPI(apiKey string) *ChimpAPI { +func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI { u := &url.URL{} u.Scheme = "https" u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) - return &ChimpAPI{url: u} + return &ChimpAPI{url: u, log: log} } type APIError struct { @@ -90,7 +92,7 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.url.Path = reportsEndpoint var response ReportsResponse - rawjson, err := runChimp(a, params) + rawjson, err := a.runChimp(params) if err != nil { return response, err } @@ -109,7 +111,7 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report - rawjson, err := runChimp(a, ReportsParams{}) + rawjson, err := a.runChimp(ReportsParams{}) if err != nil { return response, err } @@ -122,21 +124,21 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { return response, nil } -func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { +func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) { client := &http.Client{ - Transport: api.Transport, + Transport: a.Transport, Timeout: 4 * time.Second, } var b bytes.Buffer - req, err := http.NewRequest("GET", api.url.String(), &b) + req, err := http.NewRequest("GET", a.url.String(), &b) if err != nil { return nil, err } req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") - if api.Debug { - log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) + if a.debug { + a.log.Debugf("request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,15 +150,15 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) - return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body) } body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - if api.Debug { - log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) + if a.debug { + a.log.Debugf("response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index fe6892bf48743..b898cb6ba1768 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -14,6 +14,8 @@ type MailChimp struct { APIKey string `toml:"api_key"` DaysOld int `toml:"days_old"` CampaignID string `toml:"campaign_id"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -35,12 +37,13 @@ func (m *MailChimp) Description() string { return "Gathers metrics from the /3.0/reports MailChimp API" } -func (m *MailChimp) Gather(acc telegraf.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.APIKey) - } - m.api.Debug = false +func (m *MailChimp) Init() error { + m.api = NewChimpAPI(m.APIKey, m.Log) + return nil +} + +func (m *MailChimp) Gather(acc telegraf.Accumulator) error { if m.CampaignID == "" { since := "" if m.DaysOld > 0 { diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 1366d8859df5d..1df6c52cf6256 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMailChimpGatherReports(t *testing.T) { @@ -28,7 +28,8 @@ func TestMailChimpGatherReports(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -43,22 +44,22 @@ func TestMailChimpGatherReports(t *testing.T) { tags["campaign_title"] = "Freddie's Jokes Vol. 1" fields := map[string]interface{}{ - "emails_sent": int(200), - "abuse_reports": int(0), - "unsubscribed": int(2), - "hard_bounces": int(0), - "soft_bounces": int(2), - "syntax_errors": int(0), - "forwards_count": int(0), - "forwards_opens": int(0), - "opens_total": int(186), - "unique_opens": int(100), - "clicks_total": int(42), - "unique_clicks": int(400), - "unique_subscriber_clicks": int(42), - "facebook_recipient_likes": int(5), - "facebook_unique_likes": int(8), - "facebook_likes": int(42), + "emails_sent": 200, + "abuse_reports": 0, + "unsubscribed": 2, + "hard_bounces": 0, + "soft_bounces": 2, + "syntax_errors": 0, + "forwards_count": 0, + "forwards_opens": 0, + "opens_total": 186, + "unique_opens": 100, + "clicks_total": 42, + "unique_clicks": 400, + "unique_subscriber_clicks": 42, + "facebook_recipient_likes": 5, + "facebook_unique_likes": 8, + "facebook_likes": 42, "open_rate": float64(42), "click_rate": float64(42), "industry_open_rate": float64(0.17076777144396), @@ -92,7 +93,8 @@ func TestMailChimpGatherReport(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -157,7 +159,8 @@ func TestMailChimpGatherError(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index d2ef139bfc7a3..30f9ee6403074 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -163,9 +163,9 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { return nil } -func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, address string) error { ml := &MlHost{} - if err := c.gatherJSONData(url, ml); err != nil { + if err := c.gatherJSONData(address, ml); err != nil { return err } @@ -225,8 +225,8 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { return client, nil } -func (c *Marklogic) gatherJSONData(url string, v interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (c *Marklogic) gatherJSONData(address string, v interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index af197c3072089..07599ca2cc0b0 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -146,32 +146,33 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } // ParseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) ParseAddress(address string) (string, string, error) { - var protocol string +func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol string, err error) { var host string var port string - u, parseError := url.Parse(address) + parsedAddress = address + + u, parseError := url.Parse(parsedAddress) if parseError != nil { - return "", "", fmt.Errorf("Invalid server address") + return "", "", fmt.Errorf("invalid server address") } if u.Scheme != "tcp" && u.Scheme != "unix" { - return "", "", fmt.Errorf("Invalid server protocol") + return "", "", fmt.Errorf("invalid server protocol") } protocol = u.Scheme if protocol == "unix" { if u.Path == "" { - return "", "", fmt.Errorf("Invalid unix socket path") + return "", "", fmt.Errorf("invalid unix socket path") } - address = u.Path + parsedAddress = u.Path } else { if u.Host == "" { - return "", "", fmt.Errorf("Invalid host") + return "", "", fmt.Errorf("invalid host") } host = u.Hostname() @@ -185,10 +186,10 @@ func (m *Mcrouter) ParseAddress(address string) (string, string, error) { port = defaultServerURL.Port() } - address = host + ":" + port + parsedAddress = host + ":" + port } - return address, protocol, nil + return parsedAddress, protocol, nil } func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index a9b525d46b79c..f02f2b53d4b85 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestAddressParsing(t *testing.T) { @@ -30,17 +30,17 @@ func TestAddressParsing(t *testing.T) { for _, args := range acceptTests { address, protocol, err := m.ParseAddress(args[0]) - assert.Nil(t, err, args[0]) - assert.True(t, address == args[1], args[0]) - assert.True(t, protocol == args[2], args[0]) + require.Nil(t, err, args[0]) + require.Equal(t, args[1], address, args[0]) + require.Equal(t, args[2], protocol, args[0]) } for _, addr := range rejectTests { address, protocol, err := m.ParseAddress(addr) - assert.NotNil(t, err, addr) - assert.Empty(t, address, addr) - assert.Empty(t, protocol, addr) + require.NotNil(t, err, addr) + require.Empty(t, address, addr) + require.Empty(t, protocol, addr) } } @@ -129,11 +129,11 @@ func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) + require.True(t, acc.HasInt64Field("mcrouter", metric), metric) } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("mcrouter", metric), metric) + require.True(t, acc.HasFloatField("mcrouter", metric), metric) } } diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index 070b7ddd234f5..27397f715ad0d 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -7,8 +7,9 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestFullMdstatProcFile(t *testing.T) { @@ -19,7 +20,7 @@ func TestFullMdstatProcFile(t *testing.T) { } acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(10620027200), @@ -46,7 +47,7 @@ func TestFailedDiskMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(5860144128), @@ -73,7 +74,7 @@ func TestEmptyMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) } func TestInvalidMdStatProcFile1(t *testing.T) { @@ -86,7 +87,7 @@ func TestInvalidMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } const mdStatFileFull = ` diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 1d0807625b31b..1ebfe65bad6fb 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("memcached", metric), metric) + require.True(t, acc.HasInt64Field("memcached", metric), metric) } } diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 68203c9d480cb..991f8a9fd7003 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "io" - "log" "net" "net/http" "net/url" @@ -23,7 +22,7 @@ type Role string const ( MASTER Role = "master" - SLAVE = "slave" + SLAVE Role = "slave" ) type Mesos struct { @@ -100,7 +99,7 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } -func parseURL(s string, role Role) (*url.URL, error) { +func (m *Mesos) parseURL(s string, role Role) (*url.URL, error) { if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") { host, port, err := net.SplitHostPort(s) // no port specified @@ -115,7 +114,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) + m.Log.Warnf("using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -139,7 +138,7 @@ func (m *Mesos) initialize() error { m.masterURLs = make([]*url.URL, 0, len(m.Masters)) for _, master := range m.Masters { - u, err := parseURL(master, MASTER) + u, err := m.parseURL(master, MASTER) if err != nil { return err } @@ -150,7 +149,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = make([]*url.URL, 0, len(m.Slaves)) for _, slave := range m.Slaves { - u, err := parseURL(slave, SLAVE) + u, err := m.parseURL(slave, SLAVE) if err != nil { return err } @@ -241,11 +240,11 @@ func metricsDiff(role Role, w []string) []string { } // masterBlocks serves as kind of metrics registry grouping them in sets -func getMetrics(role Role, group string) []string { - m := make(map[string][]string) +func (m *Mesos) getMetrics(role Role, group string) []string { + metrics := make(map[string][]string) if role == MASTER { - m["resources"] = []string{ + metrics["resources"] = []string{ "master/cpus_percent", "master/cpus_used", "master/cpus_total", @@ -272,12 +271,12 @@ func getMetrics(role Role, group string) []string { "master/mem_revocable_used", } - m["master"] = []string{ + metrics["master"] = []string{ "master/elected", "master/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -286,7 +285,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["agents"] = []string{ + metrics["agents"] = []string{ "master/slave_registrations", "master/slave_removals", "master/slave_reregistrations", @@ -303,7 +302,7 @@ func getMetrics(role Role, group string) []string { "master/slaves_unreachable", } - m["frameworks"] = []string{ + metrics["frameworks"] = []string{ "master/frameworks_active", "master/frameworks_connected", "master/frameworks_disconnected", @@ -314,10 +313,10 @@ func getMetrics(role Role, group string) []string { // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. // These empty groups are included to prevent the "unknown metrics group" info log below. // filterMetrics() filters these metrics by looking for names with the corresponding prefix. - m["framework_offers"] = []string{} - m["allocator"] = []string{} + metrics["framework_offers"] = []string{} + metrics["allocator"] = []string{} - m["tasks"] = []string{ + metrics["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", "master/tasks_finished", @@ -333,7 +332,7 @@ func getMetrics(role Role, group string) []string { "master/tasks_unreachable", } - m["messages"] = []string{ + metrics["messages"] = []string{ "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/invalid_status_update_acknowledgements", @@ -377,14 +376,14 @@ func getMetrics(role Role, group string) []string { "master/valid_operation_status_update_acknowledgements", } - m["evqueue"] = []string{ + metrics["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", "master/operator_event_stream_subscribers", } - m["registrar"] = []string{ + metrics["registrar"] = []string{ "registrar/state_fetch_ms", "registrar/state_store_ms", "registrar/state_store_ms/max", @@ -402,7 +401,7 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/count", } } else if role == SLAVE { - m["resources"] = []string{ + metrics["resources"] = []string{ "slave/cpus_percent", "slave/cpus_used", "slave/cpus_total", @@ -429,12 +428,12 @@ func getMetrics(role Role, group string) []string { "slave/mem_revocable_used", } - m["agent"] = []string{ + metrics["agent"] = []string{ "slave/registered", "slave/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -443,7 +442,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["executors"] = []string{ + metrics["executors"] = []string{ "containerizer/mesos/container_destroy_errors", "slave/container_launch_errors", "slave/executors_preempted", @@ -456,7 +455,7 @@ func getMetrics(role Role, group string) []string { "slave/recovery_errors", } - m["tasks"] = []string{ + metrics["tasks"] = []string{ "slave/tasks_failed", "slave/tasks_finished", "slave/tasks_killed", @@ -466,7 +465,7 @@ func getMetrics(role Role, group string) []string { "slave/tasks_starting", } - m["messages"] = []string{ + metrics["messages"] = []string{ "slave/invalid_framework_messages", "slave/invalid_status_updates", "slave/valid_framework_messages", @@ -474,10 +473,10 @@ func getMetrics(role Role, group string) []string { } } - ret, ok := m[group] + ret, ok := metrics[group] if !ok { - log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) + m.Log.Infof("unknown role %q metrics group: %s", role, group) return []string{} } @@ -512,7 +511,7 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { // All other metrics have predictable names. We can use getMetrics() to retrieve them. default: - for _, v := range getMetrics(role, k) { + for _, v := range m.getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { delete(*metrics, v) } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 4b6d5ab74d371..2605ddd4678c2 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -10,8 +10,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} @@ -340,7 +341,7 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } @@ -357,7 +358,7 @@ func TestMasterFilter(t *testing.T) { // Assert unexpected metrics are not present. for _, v := range b { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } @@ -402,13 +403,13 @@ func TestSlaveFilter(t *testing.T) { m.filterMetrics(SLAVE, &slaveMetrics) for _, v := range b { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 641a8ae75db9f..4aa712d4b04f4 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -45,17 +45,17 @@ func (c *connector) Connect() (Connection, error) { return nil, err } - rcon, err := rcon.NewClient(c.hostname, p) + client, err := rcon.NewClient(c.hostname, p) if err != nil { return nil, err } - _, err = rcon.Authorize(c.password) + _, err = client.Authorize(c.password) if err != nil { return nil, err } - return &connection{rcon: rcon}, nil + return &connection{client: client}, nil } func newClient(connector Connector) *client { @@ -111,11 +111,11 @@ func (c *client) Scores(player string) ([]Score, error) { } type connection struct { - rcon *rcon.Client + client *rcon.Client } func (c *connection) Execute(command string) (string, error) { - packet, err := c.rcon.Execute(command) + packet, err := c.client.Execute(command) if err != nil { return "", err } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 378268916054d..f7f891ec775bf 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -65,7 +66,7 @@ func TestAddNonReplStats(t *testing.T) { d.flush(&acc) for key := range defaultStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -86,7 +87,7 @@ func TestAddReplStats(t *testing.T) { d.flush(&acc) for key := range mmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -120,14 +121,14 @@ func TestAddWiredTigerStats(t *testing.T) { d.flush(&acc) for key := range wiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key), key) } for key := range wiredTigerExtStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } - assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) + require.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -147,7 +148,7 @@ func TestAddShardStats(t *testing.T) { d.flush(&acc) for key := range defaultShardStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -170,7 +171,7 @@ func TestAddLatencyStats(t *testing.T) { d.flush(&acc) for key := range defaultLatencyStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -192,7 +193,7 @@ func TestAddAssertsStats(t *testing.T) { d.flush(&acc) for key := range defaultAssertsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -227,7 +228,7 @@ func TestAddCommandsStats(t *testing.T) { d.flush(&acc) for key := range defaultCommandsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -263,7 +264,7 @@ func TestAddTCMallocStats(t *testing.T) { d.flush(&acc) for key := range defaultTCMallocStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -283,7 +284,7 @@ func TestAddStorageStats(t *testing.T) { d.flush(&acc) for key := range defaultStorageStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -313,15 +314,15 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { for key := range shardHostStats { - assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } - assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) + require.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) hostsFound = append(hostsFound, host) } sort.Strings(hostsFound) sort.Strings(expectedHosts) - assert.Equal(t, hostsFound, expectedHosts) + require.Equal(t, hostsFound, expectedHosts) } func TestStateTag(t *testing.T) { @@ -527,7 +528,7 @@ func TestAddTopStats(t *testing.T) { for range topStatLines { for key := range topDataStats { - assert.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_top_stats", key)) } } } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index c8fd9f7c15284..d2313e4088f82 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -6,9 +6,9 @@ package mongodb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGetDefaultTags(t *testing.T) { @@ -37,7 +37,7 @@ func TestAddDefaultStats(t *testing.T) { require.NoError(t, err) for key := range defaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ea69c8d424f7c..2490ca2c1777c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -903,7 +903,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) (int64, int64) { +func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) { d := newVal - oldVal if d < 0 { d = newVal @@ -1311,10 +1311,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // I'm the master returnVal.ReplLag = 0 break - } else { - // I'm secondary - me = member } + + // I'm secondary + me = member } else if member.State == 1 { // Master found master = member diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 9f6ef04892ac9..908b82de1b911 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -2,10 +2,8 @@ package mongodb import ( "testing" - //"time" - //"github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLatencyStats(t *testing.T) { @@ -55,12 +53,12 @@ func TestLatencyStats(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiffZero(t *testing.T) { @@ -124,12 +122,12 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiff(t *testing.T) { @@ -193,10 +191,10 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(59177981552)) - assert.Equal(t, sl.ReadLatency, int64(2255946760057)) - assert.Equal(t, sl.WriteLatency, int64(494479456987)) - assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, sl.CommandLatency, int64(59177981552)) + require.Equal(t, sl.ReadLatency, int64(2255946760057)) + require.Equal(t, sl.WriteLatency, int64(494479456987)) + require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) } diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index 1cb1a4ba57da9..051e0b36982fe 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -6,23 +6,24 @@ import ( "net/http" "time" + "golang.org/x/net/html/charset" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) const ( - fileSystem string = "0" - directory = "1" - file = "2" - process = "3" - remoteHost = "4" - system = "5" - fifo = "6" - program = "7" - network = "8" + fileSystem = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" ) var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} @@ -244,108 +245,109 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - if resp.StatusCode == 200 { - var status Status - decoder := xml.NewDecoder(resp.Body) - decoder.CharsetReader = charset.NewReaderLabel - if err := decoder.Decode(&status); err != nil { - return fmt.Errorf("error parsing input: %v", err) - } + if resp.StatusCode != 200 { + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) + } - tags := map[string]string{ - "version": status.Server.Version, - "source": status.Server.LocalHostname, - "platform_name": status.Platform.Name, - } + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } + + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } - for _, service := range status.Services { - fields := make(map[string]interface{}) - tags["status"] = serviceStatus(service) - fields["status_code"] = service.Status - tags["pending_action"] = pendingAction(service) - fields["pending_action_code"] = service.PendingAction - tags["monitoring_status"] = monitoringStatus(service) - fields["monitoring_status_code"] = service.MonitoringStatus - tags["monitoring_mode"] = monitoringMode(service) - fields["monitoring_mode_code"] = service.MonitorMode - tags["service"] = service.Name - if service.Type == fileSystem { - fields["mode"] = service.Mode - fields["block_percent"] = service.Block.Percent - fields["block_usage"] = service.Block.Usage - fields["block_total"] = service.Block.Total - fields["inode_percent"] = service.Inode.Percent - fields["inode_usage"] = service.Inode.Usage - fields["inode_total"] = service.Inode.Total - acc.AddFields("monit_filesystem", fields, tags) - } else if service.Type == directory { - fields["mode"] = service.Mode - acc.AddFields("monit_directory", fields, tags) - } else if service.Type == file { - fields["size"] = service.Size - fields["mode"] = service.Mode - acc.AddFields("monit_file", fields, tags) - } else if service.Type == process { - fields["cpu_percent"] = service.CPU.Percent - fields["cpu_percent_total"] = service.CPU.PercentTotal - fields["mem_kb"] = service.Memory.Kilobyte - fields["mem_kb_total"] = service.Memory.KilobyteTotal - fields["mem_percent"] = service.Memory.Percent - fields["mem_percent_total"] = service.Memory.PercentTotal - fields["pid"] = service.Pid - fields["parent_pid"] = service.ParentPid - fields["threads"] = service.Threads - fields["children"] = service.Children - acc.AddFields("monit_process", fields, tags) - } else if service.Type == remoteHost { - fields["remote_hostname"] = service.Port.Hostname - fields["port_number"] = service.Port.PortNumber - fields["request"] = service.Port.Request - fields["response_time"] = service.Port.ResponseTime - fields["protocol"] = service.Port.Protocol - fields["type"] = service.Port.Type - acc.AddFields("monit_remote_host", fields, tags) - } else if service.Type == system { - fields["cpu_system"] = service.System.CPU.System - fields["cpu_user"] = service.System.CPU.User - fields["cpu_wait"] = service.System.CPU.Wait - fields["cpu_load_avg_1m"] = service.System.Load.Avg01 - fields["cpu_load_avg_5m"] = service.System.Load.Avg05 - fields["cpu_load_avg_15m"] = service.System.Load.Avg15 - fields["mem_kb"] = service.System.Memory.Kilobyte - fields["mem_percent"] = service.System.Memory.Percent - fields["swap_kb"] = service.System.Swap.Kilobyte - fields["swap_percent"] = service.System.Swap.Percent - acc.AddFields("monit_system", fields, tags) - } else if service.Type == fifo { - fields["mode"] = service.Mode - acc.AddFields("monit_fifo", fields, tags) - } else if service.Type == program { - fields["program_started"] = service.Program.Started * 10000000 - fields["program_status"] = service.Program.Status - acc.AddFields("monit_program", fields, tags) - } else if service.Type == network { - fields["link_state"] = service.Link.State - fields["link_speed"] = service.Link.Speed - fields["link_mode"] = linkMode(service) - fields["download_packets_now"] = service.Link.Download.Packets.Now - fields["download_packets_total"] = service.Link.Download.Packets.Total - fields["download_bytes_now"] = service.Link.Download.Bytes.Now - fields["download_bytes_total"] = service.Link.Download.Bytes.Total - fields["download_errors_now"] = service.Link.Download.Errors.Now - fields["download_errors_total"] = service.Link.Download.Errors.Total - fields["upload_packets_now"] = service.Link.Upload.Packets.Now - fields["upload_packets_total"] = service.Link.Upload.Packets.Total - fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now - fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total - fields["upload_errors_now"] = service.Link.Upload.Errors.Now - fields["upload_errors_total"] = service.Link.Upload.Errors.Total - acc.AddFields("monit_network", fields, tags) - } + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) } - } else { - return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) } + return nil } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index b3bbed79f68e1..ef47575e80b4c 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type transportMock struct { @@ -632,7 +632,7 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { require.NoError(t, r.Init()) err := r.Gather(&acc) - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestInvalidXMLAndInvalidTypes(t *testing.T) { diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index a9e8236ee0cf5..3fd128eb85e10 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -8,7 +8,7 @@ and creates metrics using one of the supported [input data formats][]. ```toml [[inputs.mqtt_consumer]] ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 815f27a727abf..3e88cecbbce45 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -9,6 +9,7 @@ import ( "time" mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -64,15 +65,15 @@ type MQTTConsumer struct { Log telegraf.Logger - clientFactory ClientFactory - client Client - opts *mqtt.ClientOptions - acc telegraf.TrackingAccumulator - state ConnectionState - sem semaphore - messages map[telegraf.TrackingID]bool - messagesMutex sync.Mutex - topicTag string + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool + messagesMutex sync.Mutex + chosenTopicTag string ctx context.Context cancel context.CancelFunc @@ -80,7 +81,7 @@ type MQTTConsumer struct { var sampleConfig = ` ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] @@ -174,9 +175,9 @@ func (m *MQTTConsumer) Init() error { return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - m.topicTag = "topic" + m.chosenTopicTag = "topic" if m.TopicTag != nil { - m.topicTag = *m.TopicTag + m.chosenTopicTag = *m.TopicTag } opts, err := m.createOpts() @@ -284,10 +285,10 @@ func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Mess return err } - if m.topicTag != "" { + if m.chosenTopicTag != "" { topic := msg.Topic() for _, metric := range metrics { - metric.AddTag(m.topicTag, topic) + metric.AddTag(m.chosenTopicTag, topic) } } diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go index b12f29f35c2cd..214cebd136f9c 100644 --- a/plugins/inputs/multifile/multifile_test.go +++ b/plugins/inputs/multifile/multifile_test.go @@ -5,9 +5,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestFileTypes(t *testing.T) { @@ -32,8 +32,8 @@ func TestFileTypes(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + require.Equal(t, map[string]interface{}{ "examplebool": true, "examplestring": "hello world", "exampleint": int64(123456), @@ -60,7 +60,7 @@ func FailEarly(failEarly bool, t *testing.T) error { err := m.Gather(&acc) if err == nil { - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "exampleint": int64(123456), }, acc.Metrics[0].Fields) } diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 6e81b3df2f757..28313b25534aa 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -905,6 +906,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return err } defer rows.Close() + var ( command string state string @@ -948,6 +950,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. if err != nil { return err } + defer connRows.Close() for connRows.Next() { var user string @@ -1812,90 +1815,100 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula } for _, database := range dbList { - rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + err := m.gatherSchemaForDB(db, database, servtag, acc) if err != nil { return err } - defer rows.Close() - var ( - tableSchema string - tableName string - tableType string - engine string - version float64 - rowFormat string - tableRows float64 - dataLength float64 - indexLength float64 - dataFree float64 - createOptions string + } + return nil +} + +func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, acc telegraf.Accumulator) error { + rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + if err != nil { + return err + } + defer rows.Close() + + var ( + tableSchema string + tableName string + tableType string + engine string + version float64 + rowFormat string + tableRows float64 + dataLength float64 + indexLength float64 + dataFree float64 + createOptions string + ) + + for rows.Next() { + err = rows.Scan( + &tableSchema, + &tableName, + &tableType, + &engine, + &version, + &rowFormat, + &tableRows, + &dataLength, + &indexLength, + &dataFree, + &createOptions, ) - for rows.Next() { - err = rows.Scan( - &tableSchema, - &tableName, - &tableType, - &engine, - &version, - &rowFormat, - &tableRows, - &dataLength, - &indexLength, - &dataFree, - &createOptions, - ) - if err != nil { - return err - } - tags := map[string]string{"server": servtag} - tags["schema"] = tableSchema - tags["table"] = tableName - - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_rows"), - map[string]interface{}{"value": tableRows}, tags) - - dlTags := copyTags(tags) - dlTags["component"] = "data_length" - acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), - map[string]interface{}{"value": dataLength}, dlTags) - - ilTags := copyTags(tags) - ilTags["component"] = "index_length" - acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), - map[string]interface{}{"value": indexLength}, ilTags) - - dfTags := copyTags(tags) - dfTags["component"] = "data_free" - acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), - map[string]interface{}{"value": dataFree}, dfTags) - } else { - acc.AddFields("mysql_table_schema", - map[string]interface{}{"rows": tableRows}, tags) + if err != nil { + return err + } + tags := map[string]string{"server": servtag} + tags["schema"] = tableSchema + tags["table"] = tableName - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_length": dataLength}, tags) + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) + + dlTags := copyTags(tags) + dlTags["component"] = "data_length" + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) + + ilTags := copyTags(tags) + ilTags["component"] = "index_length" + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) + + dfTags := copyTags(tags) + dfTags["component"] = "data_free" + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) + } else { + acc.AddFields("mysql_table_schema", + map[string]interface{}{"rows": tableRows}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"index_length": indexLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_length": dataLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_free": dataFree}, tags) - } + acc.AddFields("mysql_table_schema", + map[string]interface{}{"index_length": indexLength}, tags) - versionTags := copyTags(tags) - versionTags["type"] = tableType - versionTags["engine"] = engine - versionTags["row_format"] = rowFormat - versionTags["create_options"] = createOptions + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_free": dataFree}, tags) + } - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_version"), - map[string]interface{}{"value": version}, versionTags) - } else { - acc.AddFields("mysql_table_schema_version", - map[string]interface{}{"table_version": version}, versionTags) - } + versionTags := copyTags(tags) + versionTags["type"] = tableType + versionTags["engine"] = engine + versionTags["row_format"] = rowFormat + versionTags["create_options"] = createOptions + + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) + } else { + acc.AddFields("mysql_table_schema_version", + map[string]interface{}{"table_version": version}, versionTags) } } return nil diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 0cdcd4b1cd345..410f80213252f 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -5,9 +5,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMysqlDefaultsToLocalIntegration(t *testing.T) { @@ -23,7 +23,7 @@ func TestMysqlDefaultsToLocalIntegration(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) } func TestMysqlMultipleInstancesIntegration(t *testing.T) { @@ -43,9 +43,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { var acc, acc2 testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) // acc should have global variables - assert.True(t, acc.HasMeasurement("mysql_variables")) + require.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ Servers: []string{testServer}, @@ -53,9 +53,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { } err = m2.Gather(&acc2) require.NoError(t, err) - assert.True(t, acc2.HasMeasurement("mysql")) + require.True(t, acc2.HasMeasurement("mysql")) // acc2 should not have global variables - assert.False(t, acc2.HasMeasurement("mysql_variables")) + require.False(t, acc2.HasMeasurement("mysql_variables")) } func TestMysqlMultipleInits(t *testing.T) { @@ -65,16 +65,16 @@ func TestMysqlMultipleInits(t *testing.T) { m2 := &Mysql{} m.InitMysql() - assert.True(t, m.initDone) - assert.False(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.False(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) m2.InitMysql() - assert.True(t, m.initDone) - assert.True(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.True(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) } func TestMysqlGetDSNTag(t *testing.T) { From a2cf4fb98f0ecb3cf3da11540cd66e35aef7375b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jean-S=C3=A9bastien=20Dupuy?= Date: Tue, 2 Nov 2021 15:50:38 +0100 Subject: [PATCH 019/133] docs: add elastic pool in supported versions in sqlserver (#10044) --- plugins/inputs/sqlserver/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index c92f0db9af2f3..c818b4b6d01b3 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -10,6 +10,7 @@ lightweight and use Dynamic Management Views supplied by SQL Server. - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance +- Azure SQL Elastic Pool ### Additional Setup From 79dadd3da88e705ccc62ecee1414177360a0e06c Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 2 Nov 2021 15:03:24 -0500 Subject: [PATCH 020/133] chore: add Super Linter Github Action (#10014) --- .github/workflows/golangci-lint.yml | 35 ----------------- .github/workflows/linter.yml | 59 +++++++++++++++++++++++++++++ .markdownlint.yml | 3 ++ Makefile | 12 +++++- README.md | 18 +++++---- docs/developers/REVIEWS.md | 17 ++++++++- 6 files changed, 99 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/golangci-lint.yml create mode 100644 .github/workflows/linter.yml create mode 100644 .markdownlint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index d4eac0d328059..0000000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: golangci-lint -on: - push: - branches: - - master - pull_request: - branches: - - master - schedule: - # Trigger every day at 16:00 UTC - - cron: '0 16 * * *' -jobs: - golangci-pr: - if: github.ref != 'refs/heads/master' - name: lint-pr-changes - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 - with: - version: v1.42.1 - only-new-issues: true - golangci-master: - if: github.ref == 'refs/heads/master' - name: lint-master-all - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: golangci-lint - uses: golangci/golangci-lint-action@v2 - with: - version: v1.42.1 - only-new-issues: true - args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000000000..21cdd54d7176e --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,59 @@ +--- +################################# +################################# +## Super Linter GitHub Actions ## +################################# +################################# +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master, main] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, main] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: github/super-linter@v4 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LINTER_RULES_PATH: '.' + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_MARKDOWN: true + VALIDATE_GO: true diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000000..1344b312f825e --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,3 @@ +{ + "MD013": false +} diff --git a/Makefile b/Makefile index 09a6babaee73f..7b91fa1edcfec 100644 --- a/Makefile +++ b/Makefile @@ -140,9 +140,12 @@ vet: .PHONY: lint-install lint-install: - + @echo "Installing golangci-lint" go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 + @echo "Installing markdownlint" + npm install -g markdownlint-cli + .PHONY: lint lint: ifeq (, $(shell which golangci-lint)) @@ -152,6 +155,13 @@ endif golangci-lint run +ifeq (, $(shell which markdownlint-cli)) + $(info markdownlint-cli can't be found, please run: make lint-install) + exit 1 +endif + + markdownlint-cli + .PHONY: lint-branch lint-branch: ifeq (, $(shell which golangci-lint)) diff --git a/README.md b/README.md index 03d7428c12591..94762148d04ea 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) +[![GitHub Super-Linter](https://github.com/influxdata/telegraf/workflows/Lint%20Code%20Base/badge.svg)](https://github.com/marketplace/actions/super-linter) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a plugin system to enable developers in the community to easily add support for additional @@ -74,11 +75,14 @@ Telegraf requires Go version 1.17 or newer, the Makefile requires GNU make. 1. [Install Go](https://golang.org/doc/install) >=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: - ``` + + ```shell git clone https://github.com/influxdata/telegraf.git ``` + 3. Run `make` from the source directory - ``` + + ```shell cd telegraf make ``` @@ -106,31 +110,31 @@ See usage with: telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file ```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined ```shell telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout ```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file ```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins ```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index 0f036d225b7ba..49107c03f9da9 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -9,7 +9,9 @@ All pull requests should follow the style and best practices in the document. ## Process + The review process is roughly structured as follows: + 1. Submit a pull request. Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. 1. Get feedback from a first reviewer and a `ready for final review` tag. @@ -21,6 +23,7 @@ It might take some time until your PR gets merged, depending on the release cycl your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. If there is no activity in a pull-request or the contributor does not respond, we apply the following scheme: + 1. We send a first reminder after at least 2 weeks of inactivity. 1. After at least another two weeks of inactivity we send a second reminder and are setting the `waiting for response` tag. 1. Another two weeks later we will ask the community for help setting the `help wanted` reminder. @@ -34,10 +37,13 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - SampleConfig must match the readme, but not include the plugin name. - structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) - plugins that want to log should declare the Telegraf logger, not use the log package. eg: + ```Go Log telegraf.Logger `toml:"-"` ``` + (in tests, you can do `myPlugin.Log = testutil.Logger{}`) + - Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. - `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. - plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. @@ -67,6 +73,9 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. +## Linting + +Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. ## Testing @@ -82,6 +91,7 @@ used for assertions within the tests when possible, with preference towards github.com/stretchr/testify/require. Primarily use the require package to avoid cascading errors: + ```go assert.Equal(t, lhs, rhs) # avoid require.Equal(t, lhs, rhs) # good @@ -96,6 +106,7 @@ Ensure the [[SampleConfig]] and match with the current standards. READMEs should: + - be spaces, not tabs - be indented consistently, matching other READMEs - have two `#` for comments @@ -121,7 +132,8 @@ Metrics use `snake_case` naming style. Generally enumeration data should be encoded as a tag. In some cases it may be desirable to also include the data as an integer field: -``` + +```shell net_response,result=success result_code=0i ``` @@ -129,7 +141,8 @@ net_response,result=success result_code=0i Use tags for each range with the `le` tag, and `+Inf` for the values out of range. This format is inspired by the Prometheus project: -``` + +```shell cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 From 5b1c9f3c4c7d83ca3df19a319ed110fbb3938b6d Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 3 Nov 2021 12:41:07 -0600 Subject: [PATCH 021/133] fix: update readme.md to point at latest docs URL --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 94762148d04ea..d1f5908a9021d 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ## Documentation -[Latest Release Documentation](https://docs.influxdata.com/telegraf) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) For documentation on the latest development code see the [documentation index](/docs). From 00325f20c00e828608043f505687b829139cfcca Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 3 Nov 2021 15:57:44 -0500 Subject: [PATCH 022/133] chore: only check new issues with Go linter (#10054) --- .github/workflows/golangci-lint.yml | 35 +++++++++++++++++++++++++++++ .github/workflows/linter.yml | 1 - README.md | 1 - 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/golangci-lint.yml diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000000..d4eac0d328059 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,35 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: + branches: + - master + schedule: + # Trigger every day at 16:00 UTC + - cron: '0 16 * * *' +jobs: + golangci-pr: + if: github.ref != 'refs/heads/master' + name: lint-pr-changes + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + golangci-master: + if: github.ref == 'refs/heads/master' + name: lint-master-all + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.42.1 + only-new-issues: true + args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 21cdd54d7176e..8ba9ae2944823 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -56,4 +56,3 @@ jobs: LINTER_RULES_PATH: '.' MARKDOWN_CONFIG_FILE: .markdownlint.yml VALIDATE_MARKDOWN: true - VALIDATE_GO: true diff --git a/README.md b/README.md index d1f5908a9021d..122b20839db6b 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,6 @@ [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -[![GitHub Super-Linter](https://github.com/influxdata/telegraf/workflows/Lint%20Code%20Base/badge.svg)](https://github.com/marketplace/actions/super-linter) Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a plugin system to enable developers in the community to easily add support for additional From b4cafff535ced6d999e5e851e5fa6a94f0b122f4 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 3 Nov 2021 16:11:47 -0600 Subject: [PATCH 023/133] fix: remove telegraflinter from in-tree (#10053) --- .golangci.yml | 4 --- go.mod | 2 +- telegraflinter/README.md | 31 ------------------ telegraflinter/telegraflinter.go | 54 -------------------------------- 4 files changed, 1 insertion(+), 90 deletions(-) delete mode 100644 telegraflinter/README.md delete mode 100644 telegraflinter/telegraflinter.go diff --git a/.golangci.yml b/.golangci.yml index 470fc116bfb37..a4d14ddd80362 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,10 +21,6 @@ linters: - varcheck linters-settings: - # custom: - # telegraflinter: - # path: telegraflinter.so - # description: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review" revive: rules: - name: argument-limit diff --git a/go.mod b/go.mod index ef426a7d1a8ed..7b54ae3c6e47a 100644 --- a/go.mod +++ b/go.mod @@ -291,7 +291,7 @@ require ( golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.5 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 diff --git a/telegraflinter/README.md b/telegraflinter/README.md deleted file mode 100644 index b049cf6446bc6..0000000000000 --- a/telegraflinter/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Private linter for Telegraf - -The purpose of this linter is to enforce the review criteria for the Telegraf project, outlined here: https://github.com/influxdata/telegraf/wiki/Review. This is currently not compatible with the linter running in the CI and can only be ran locally. - -## Running it locally - -To use the Telegraf linter, you need a binary of golangci-lint that was compiled with CGO enabled. Currently no release is provided with it enabled, therefore you will need to clone the source code and compile it yourself. You can run the following commands to achieve this: - -1. `git clone https://github.com/sspaink/golangci-lint.git` -2. `cd golangci-lint` -3. `git checkout tags/v1.39.0 -b 1390` -4. `CGO_ENABLED=true go build -o golangci-lint-cgo ./cmd/golangci-lint` - -You will now have the binary you need to run the Telegraf linter. The Telegraf linter will now need to be compiled as a plugin to get a *.so file. [Currently plugins are only supported on Linux, FreeBSD, and macOS](https://golang.org/pkg/plugin/). From the root of the Telegraf project, you can run the following commands to compile the linter and run it: - -1. `CGO_ENABLED=true go build -buildmode=plugin telegraflinter/telegraflinter.go` -2. In the .golanci-lint file: - * uncomment the `custom` section under the `linters-settings` section - * uncomment `telegraflinter` under the `enable` section -3. `golanci-lint-cgo run` - -*Note:* If you made a change to the telegraf linter and want to run it again, be sure to clear the [cache directory](https://golang.org/pkg/os/#UserCacheDir). On unix systems you can run `rm -rf ~/.cache/golangci-lint` otherwise it will seem like nothing changed. - -## Requirement - -This linter lives in the Telegraf repository and is compiled to become a Go plugin, any packages used in the linter *MUST* match the version in the golanci-lint otherwise there will be issues. For example the import `golang.org/x/tools v0.1.0` needs to match what golangci-lint is using. - -## Useful references - -* https://golangci-lint.run/contributing/new-linters/#how-to-add-a-private-linter-to-golangci-lint -* https://github.com/golangci/example-plugin-linter diff --git a/telegraflinter/telegraflinter.go b/telegraflinter/telegraflinter.go deleted file mode 100644 index b295327f8eed5..0000000000000 --- a/telegraflinter/telegraflinter.go +++ /dev/null @@ -1,54 +0,0 @@ -// This must be package main -package main - -import ( - "go/ast" - "strings" - - "golang.org/x/tools/go/analysis" -) - -type analyzerPlugin struct{} - -// This must be implemented -func (*analyzerPlugin) GetAnalyzers() []*analysis.Analyzer { - return []*analysis.Analyzer{ - TelegrafAnalyzer, - } -} - -// This must be defined and named 'AnalyzerPlugin' -var AnalyzerPlugin analyzerPlugin - -var TelegrafAnalyzer = &analysis.Analyzer{ - Name: "telegraflinter", - Doc: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review", - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - for _, file := range pass.Files { - ast.Inspect(file, func(n ast.Node) bool { - checkLogImport(n, pass) - return true - }) - } - return nil, nil -} - -func checkLogImport(n ast.Node, pass *analysis.Pass) { - if !strings.HasPrefix(pass.Pkg.Path(), "github.com/influxdata/telegraf/plugins/") { - return - } - if importSpec, ok := n.(*ast.ImportSpec); ok { - if importSpec.Path != nil && strings.HasPrefix(importSpec.Path.Value, "\"log\"") { - pass.Report(analysis.Diagnostic{ - Pos: importSpec.Pos(), - End: 0, - Category: "log", - Message: "Don't use log package in plugin, use the Telegraf logger.", - SuggestedFixes: nil, - }) - } - } -} From 60400662ea9aeafe0122e92226d025e897c2cbd1 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Thu, 4 Nov 2021 11:40:11 -0500 Subject: [PATCH 024/133] chore: don't trigger share-artifacts if no go files changed (#10060) --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index e5d535bf41115..3fa611f8b26fb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -399,6 +399,8 @@ jobs: share-artifacts: executor: aws-cli/default steps: + - checkout + - check-changed-files-or-halt - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} From fb5b541b1a3c97f0b9f280b86e62b1a86f0c8e64 Mon Sep 17 00:00:00 2001 From: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Date: Thu, 4 Nov 2021 17:45:52 +0100 Subject: [PATCH 025/133] feat: Extend regexp processor do allow renaming of measurements, tags and fields (#9561) --- plugins/processors/regex/README.md | 36 +- plugins/processors/regex/regex.go | 216 +++++++- plugins/processors/regex/regex_test.go | 658 ++++++++++++++++++++++--- 3 files changed, 827 insertions(+), 83 deletions(-) diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md index a6cef82a09142..578ed13d067c6 100644 --- a/plugins/processors/regex/README.md +++ b/plugins/processors/regex/README.md @@ -4,6 +4,8 @@ The `regex` plugin transforms tag and field values with regex pattern. If `resul For tags transforms, if `append` is set to `true`, it will append the transformation to the existing tag value, instead of overwriting it. +For metrics transforms, `key` denotes the element that should be transformed. Furthermore, `result_key` allows control over the behavior applied in case the resulting `tag` or `field` name already exists. + ### Configuration: ```toml @@ -38,6 +40,38 @@ For tags transforms, if `append` is set to `true`, it will append the transforma pattern = ".*category=(\\w+).*" replacement = "${1}" result_key = "search_category" + + # Rename metric fields + [[processors.regex.field_rename]] + ## Regular expression to match on a field name + pattern = "^search_(\\w+)d$" + ## Matches of the pattern will be replaced with this string. Use ${1} + ## notation to use the text of the first submatch. + replacement = "${1}" + ## If the new field name already exists, you can either "overwrite" the + ## existing one with the value of the renamed field OR you can "keep" + ## both the existing and source field. + # result_key = "keep" + + # Rename metric tags + # [[processors.regex.tag_rename]] + # ## Regular expression to match on a tag name + # pattern = "^search_(\\w+)d$" + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. + # replacement = "${1}" + # ## If the new tag name already exists, you can either "overwrite" the + # ## existing one with the value of the renamed tag OR you can "keep" + # ## both the existing and source tag. + # # result_key = "keep" + + # Rename metrics + # [[processors.regex.metric_rename]] + # ## Regular expression to match on an metric name + # pattern = "^search_(\\w+)d$" + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. + # replacement = "${1}" ``` ### Tags: @@ -46,5 +80,5 @@ No tags are applied by this processor. ### Example Output: ``` -nginx_requests,verb=GET,resp_code=2xx request="/api/search/?category=plugins&q=regex&sort=asc",method="/search/",search_category="plugins",referrer="-",ident="-",http_version=1.1,agent="UserAgent",client_ip="127.0.0.1",auth="-",resp_bytes=270i 1519652321000000000 +nginx_requests,verb=GET,resp_code=2xx request="/api/search/?category=plugins&q=regex&sort=asc",method="/search/",category="plugins",referrer="-",ident="-",http_version=1.1,agent="UserAgent",client_ip="127.0.0.1",auth="-",resp_bytes=270i 1519652321000000000 ``` diff --git a/plugins/processors/regex/regex.go b/plugins/processors/regex/regex.go index 47b53546f4ffe..4cf0c985646fe 100644 --- a/plugins/processors/regex/regex.go +++ b/plugins/processors/regex/regex.go @@ -1,24 +1,30 @@ package regex import ( + "fmt" "regexp" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/processors" ) type Regex struct { - Tags []converter - Fields []converter - regexCache map[string]*regexp.Regexp + Tags []converter `toml:"tags"` + Fields []converter `toml:"fields"` + TagRename []converter `toml:"tag_rename"` + FieldRename []converter `toml:"field_rename"` + MetricRename []converter `toml:"metric_rename"` + Log telegraf.Logger `toml:"-"` + regexCache map[string]*regexp.Regexp } type converter struct { - Key string - Pattern string - Replacement string - ResultKey string - Append bool + Key string `toml:"key"` + Pattern string `toml:"pattern"` + Replacement string `toml:"replacement"` + ResultKey string `toml:"result_key"` + Append bool `toml:"append"` } const sampleConfig = ` @@ -50,12 +56,105 @@ const sampleConfig = ` # pattern = ".*category=(\\w+).*" # replacement = "${1}" # result_key = "search_category" + + ## Rename metric fields + # [[processors.regex.field_rename]] + # ## Regular expression to match on a field name + # pattern = "^search_(\\w+)d$" + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. + # replacement = "${1}" + # ## If the new field name already exists, you can either "overwrite" the + # ## existing one with the value of the renamed field OR you can "keep" + # ## both the existing and source field. + # # result_key = "keep" + + ## Rename metric tags + # [[processors.regex.tag_rename]] + # ## Regular expression to match on a tag name + # pattern = "^search_(\\w+)d$" + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. + # replacement = "${1}" + # ## If the new tag name already exists, you can either "overwrite" the + # ## existing one with the value of the renamed tag OR you can "keep" + # ## both the existing and source tag. + # # result_key = "keep" + + ## Rename metrics + # [[processors.regex.metric_rename]] + # ## Regular expression to match on an metric name + # pattern = "^search_(\\w+)d$" + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. + # replacement = "${1}" ` -func NewRegex() *Regex { - return &Regex{ - regexCache: make(map[string]*regexp.Regexp), +func (r *Regex) Init() error { + r.regexCache = make(map[string]*regexp.Regexp) + + // Compile the regular expressions + for _, c := range r.Tags { + if _, compiled := r.regexCache[c.Pattern]; !compiled { + r.regexCache[c.Pattern] = regexp.MustCompile(c.Pattern) + } + } + for _, c := range r.Fields { + if _, compiled := r.regexCache[c.Pattern]; !compiled { + r.regexCache[c.Pattern] = regexp.MustCompile(c.Pattern) + } + } + + resultOptions := []string{"overwrite", "keep"} + for _, c := range r.TagRename { + if c.Key != "" { + r.Log.Info("'tag_rename' section contains a key which is ignored during processing") + } + + if c.ResultKey == "" { + c.ResultKey = "keep" + } + if err := choice.Check(c.ResultKey, resultOptions); err != nil { + return fmt.Errorf("invalid metrics result_key: %v", err) + } + + if _, compiled := r.regexCache[c.Pattern]; !compiled { + r.regexCache[c.Pattern] = regexp.MustCompile(c.Pattern) + } + } + + for _, c := range r.FieldRename { + if c.Key != "" { + r.Log.Info("'field_rename' section contains a key which is ignored during processing") + } + + if c.ResultKey == "" { + c.ResultKey = "keep" + } + if err := choice.Check(c.ResultKey, resultOptions); err != nil { + return fmt.Errorf("invalid metrics result_key: %v", err) + } + + if _, compiled := r.regexCache[c.Pattern]; !compiled { + r.regexCache[c.Pattern] = regexp.MustCompile(c.Pattern) + } + } + + for _, c := range r.MetricRename { + if c.Key != "" { + r.Log.Info("'metric_rename' section contains a key which is ignored during processing") + } + + if c.ResultKey != "" { + r.Log.Info("'metric_rename' section contains a 'result_key' ignored during processing as metrics will ALWAYS the name") + } + + if _, compiled := r.regexCache[c.Pattern]; !compiled { + r.regexCache[c.Pattern] = regexp.MustCompile(c.Pattern) + } } + + return nil } func (r *Regex) SampleConfig() string { @@ -63,7 +162,7 @@ func (r *Regex) SampleConfig() string { } func (r *Regex) Description() string { - return "Transforms tag and field values with regex pattern" + return "Transforms tag and field values as well as measurement, tag and field names with regex pattern" } func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric { @@ -83,27 +182,96 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, converter := range r.Fields { if value, ok := metric.GetField(converter.Key); ok { - switch value := value.(type) { - case string: - if key, newValue := r.convert(converter, value); newValue != "" { + if v, ok := value.(string); ok { + if key, newValue := r.convert(converter, v); newValue != "" { metric.AddField(key, newValue) } } } } + + for _, converter := range r.TagRename { + regex := r.regexCache[converter.Pattern] + replacements := make(map[string]string) + for _, tag := range metric.TagList() { + name := tag.Key + if regex.MatchString(name) { + newName := regex.ReplaceAllString(name, converter.Replacement) + + if !metric.HasTag(newName) { + // There is no colliding tag, we can just change the name. + tag.Key = newName + continue + } + + if converter.ResultKey == "overwrite" { + // We got a colliding tag, remember the replacement and do it later + replacements[name] = newName + } + } + } + // We needed to postpone the replacement as we cannot modify the tag-list + // while iterating it as this will result in invalid memory dereference panic. + for oldName, newName := range replacements { + value, ok := metric.GetTag(oldName) + if !ok { + // Just in case the tag got removed in the meantime + continue + } + metric.AddTag(newName, value) + metric.RemoveTag(oldName) + } + } + + for _, converter := range r.FieldRename { + regex := r.regexCache[converter.Pattern] + replacements := make(map[string]string) + for _, field := range metric.FieldList() { + name := field.Key + if regex.MatchString(name) { + newName := regex.ReplaceAllString(name, converter.Replacement) + + if !metric.HasField(newName) { + // There is no colliding field, we can just change the name. + field.Key = newName + continue + } + + if converter.ResultKey == "overwrite" { + // We got a colliding field, remember the replacement and do it later + replacements[name] = newName + } + } + } + // We needed to postpone the replacement as we cannot modify the field-list + // while iterating it as this will result in invalid memory dereference panic. + for oldName, newName := range replacements { + value, ok := metric.GetField(oldName) + if !ok { + // Just in case the field got removed in the meantime + continue + } + metric.AddField(newName, value) + metric.RemoveField(oldName) + } + } + + for _, converter := range r.MetricRename { + regex := r.regexCache[converter.Pattern] + value := metric.Name() + if regex.MatchString(value) { + newValue := regex.ReplaceAllString(value, converter.Replacement) + metric.SetName(newValue) + } + } } return in } -func (r *Regex) convert(c converter, src string) (string, string) { - regex, compiled := r.regexCache[c.Pattern] - if !compiled { - regex = regexp.MustCompile(c.Pattern) - r.regexCache[c.Pattern] = regex - } +func (r *Regex) convert(c converter, src string) (key string, value string) { + regex := r.regexCache[c.Pattern] - value := "" if c.ResultKey == "" || regex.MatchString(src) { value = regex.ReplaceAllString(src, c.Replacement) } @@ -116,7 +284,5 @@ func (r *Regex) convert(c converter, src string) (string, string) { } func init() { - processors.Add("regex", func() telegraf.Processor { - return NewRegex() - }) + processors.Add("regex", func() telegraf.Processor { return &Regex{} }) } diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index 2f8890bba7e9e..8baa0d79a2686 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -5,12 +5,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" ) func newM1() telegraf.Metric { - m1 := metric.New("access_log", + return testutil.MustMetric( + "access_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -20,11 +22,11 @@ func newM1() telegraf.Metric { }, time.Now(), ) - return m1 } func newM2() telegraf.Metric { - m2 := metric.New("access_log", + return testutil.MustMetric( + "access_log", map[string]string{ "verb": "GET", "resp_code": "200", @@ -36,7 +38,6 @@ func newM2() telegraf.Metric { }, time.Now(), ) - return m2 } func TestFieldConversions(t *testing.T) { @@ -72,10 +73,11 @@ func TestFieldConversions(t *testing.T) { } for _, test := range tests { - regex := NewRegex() - regex.Fields = []converter{ - test.converter, + regex := Regex{ + Fields: []converter{test.converter}, + Log: testutil.Logger{}, } + require.NoError(t, regex.Init()) processed := regex.Apply(newM1()) @@ -84,9 +86,9 @@ func TestFieldConversions(t *testing.T) { "resp_code": "200", } - assert.Equal(t, test.expectedFields, processed[0].Fields(), test.message) - assert.Equal(t, expectedTags, processed[0].Tags(), "Should not change tags") - assert.Equal(t, "access_log", processed[0].Name(), "Should not change name") + require.Equal(t, test.expectedFields, processed[0].Fields(), test.message) + require.Equal(t, expectedTags, processed[0].Tags(), "Should not change tags") + require.Equal(t, "access_log", processed[0].Name(), "Should not change name") } } @@ -139,10 +141,11 @@ func TestTagConversions(t *testing.T) { } for _, test := range tests { - regex := NewRegex() - regex.Tags = []converter{ - test.converter, + regex := Regex{ + Tags: []converter{test.converter}, + Log: testutil.Logger{}, } + require.NoError(t, regex.Init()) processed := regex.Apply(newM1()) @@ -150,43 +153,580 @@ func TestTagConversions(t *testing.T) { "request": "/users/42/", } - assert.Equal(t, expectedFields, processed[0].Fields(), test.message, "Should not change fields") - assert.Equal(t, test.expectedTags, processed[0].Tags(), test.message) - assert.Equal(t, "access_log", processed[0].Name(), "Should not change name") + require.Equal(t, expectedFields, processed[0].Fields(), test.message, "Should not change fields") + require.Equal(t, test.expectedTags, processed[0].Tags(), test.message) + require.Equal(t, "access_log", processed[0].Name(), "Should not change name") } } -func TestMultipleConversions(t *testing.T) { - regex := NewRegex() - regex.Tags = []converter{ +func TestMetricNameConversions(t *testing.T) { + inputTemplate := []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + } + + tests := []struct { + name string + converter converter + expected []telegraf.Metric + }{ { - Key: "resp_code", - Pattern: "^(\\d)\\d\\d$", - Replacement: "${1}xx", - ResultKey: "resp_code_group", + name: "Should change metric name", + converter: converter{ + Pattern: "^(\\w+)_log$", + Replacement: "${1}", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, + }, + } + + for _, test := range tests { + // Copy the inputs as they will be modified by the processor + input := make([]telegraf.Metric, len(inputTemplate)) + for i, m := range inputTemplate { + input[i] = m.Copy() + } + + t.Run(test.name, func(t *testing.T) { + regex := Regex{ + MetricRename: []converter{test.converter}, + Log: testutil.Logger{}, + } + require.NoError(t, regex.Init()) + + actual := regex.Apply(input...) + testutil.RequireMetricsEqual(t, test.expected, actual) + }) + } +} + +func TestFieldRenameConversions(t *testing.T) { + inputTemplate := []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + } + + tests := []struct { + name string + converter converter + expected []telegraf.Metric + }{ + { + name: "Should change field name", + converter: converter{ + Pattern: "^(?:ignore|error)_(\\w+)$", + Replacement: "result_${1}", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "result_number": int64(200), + "result_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "result_number": int64(404), + "result_flag": true, + "result_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, }, { - Key: "resp_code_group", - Pattern: "2xx", - Replacement: "OK", - ResultKey: "resp_code_text", + name: "Should keep existing field name", + converter: converter{ + Pattern: "^(?:ignore|error)_(\\w+)$", + Replacement: "request", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, }, + { + name: "Should overwrite existing field name", + converter: converter{ + Pattern: "^ignore_bool$", + Replacement: "request", + ResultKey: "overwrite", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "ignore_number": int64(200), + "request": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, + }, + } + + for _, test := range tests { + // Copy the inputs as they will be modified by the processor + input := make([]telegraf.Metric, len(inputTemplate)) + for i, m := range inputTemplate { + input[i] = m.Copy() + } + + t.Run(test.name, func(t *testing.T) { + regex := Regex{ + FieldRename: []converter{test.converter}, + Log: testutil.Logger{}, + } + require.NoError(t, regex.Init()) + + actual := regex.Apply(input...) + testutil.RequireMetricsEqual(t, test.expected, actual) + }) } - regex.Fields = []converter{ +} + +func TestTagRenameConversions(t *testing.T) { + inputTemplate := []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + } + + tests := []struct { + name string + converter converter + expected []telegraf.Metric + }{ { - Key: "request", - Pattern: "^/api(?P/[\\w/]+)\\S*", - Replacement: "${method}", - ResultKey: "method", + name: "Should change tag name", + converter: converter{ + Pattern: "^resp_(\\w+)$", + Replacement: "${1}", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, }, { - Key: "request", - Pattern: ".*category=(\\w+).*", - Replacement: "${1}", - ResultKey: "search_category", + name: "Should keep existing tag name", + converter: converter{ + Pattern: "^resp_(\\w+)$", + Replacement: "verb", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "GET", + "resp_code": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, + }, + { + name: "Should overwrite existing tag name", + converter: converter{ + Pattern: "^resp_(\\w+)$", + Replacement: "verb", + ResultKey: "overwrite", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "200", + }, + map[string]interface{}{ + "request": "/users/42/", + }, + time.Unix(1627646243, 0), + ), + testutil.MustMetric( + "access_log", + map[string]string{ + "verb": "200", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(200), + "ignore_bool": true, + }, + time.Unix(1627646253, 0), + ), + testutil.MustMetric( + "error_log", + map[string]string{ + "verb": "404", + }, + map[string]interface{}{ + "request": "/api/search/?category=plugins&q=regex&sort=asc", + "ignore_number": int64(404), + "ignore_flag": true, + "error_message": "request too silly", + }, + time.Unix(1627646263, 0), + ), + }, }, } + for _, test := range tests { + // Copy the inputs as they will be modified by the processor + input := make([]telegraf.Metric, len(inputTemplate)) + for i, m := range inputTemplate { + input[i] = m.Copy() + } + + t.Run(test.name, func(t *testing.T) { + regex := Regex{ + TagRename: []converter{test.converter}, + Log: testutil.Logger{}, + } + require.NoError(t, regex.Init()) + + actual := regex.Apply(input...) + testutil.RequireMetricsEqual(t, test.expected, actual) + }) + } +} + +func TestMultipleConversions(t *testing.T) { + regex := Regex{ + Tags: []converter{ + { + Key: "resp_code", + Pattern: "^(\\d)\\d\\d$", + Replacement: "${1}xx", + ResultKey: "resp_code_group", + }, + { + Key: "resp_code_group", + Pattern: "2xx", + Replacement: "OK", + ResultKey: "resp_code_text", + }, + }, + Fields: []converter{ + { + Key: "request", + Pattern: "^/api(?P/[\\w/]+)\\S*", + Replacement: "${method}", + ResultKey: "method", + }, + { + Key: "request", + Pattern: ".*category=(\\w+).*", + Replacement: "${1}", + ResultKey: "search_category", + }, + }, + Log: testutil.Logger{}, + } + require.NoError(t, regex.Init()) + processed := regex.Apply(newM2()) expectedFields := map[string]interface{}{ @@ -203,8 +743,8 @@ func TestMultipleConversions(t *testing.T) { "resp_code_text": "OK", } - assert.Equal(t, expectedFields, processed[0].Fields()) - assert.Equal(t, expectedTags, processed[0].Tags()) + require.Equal(t, expectedFields, processed[0].Fields()) + require.Equal(t, expectedTags, processed[0].Tags()) } func TestNoMatches(t *testing.T) { @@ -250,34 +790,38 @@ func TestNoMatches(t *testing.T) { } for _, test := range tests { - regex := NewRegex() - regex.Fields = []converter{ - test.converter, + regex := Regex{ + Fields: []converter{test.converter}, + Log: testutil.Logger{}, } + require.NoError(t, regex.Init()) processed := regex.Apply(newM1()) - assert.Equal(t, test.expectedFields, processed[0].Fields(), test.message) + require.Equal(t, test.expectedFields, processed[0].Fields(), test.message) } } func BenchmarkConversions(b *testing.B) { - regex := NewRegex() - regex.Tags = []converter{ - { - Key: "resp_code", - Pattern: "^(\\d)\\d\\d$", - Replacement: "${1}xx", - ResultKey: "resp_code_group", + regex := Regex{ + Tags: []converter{ + { + Key: "resp_code", + Pattern: "^(\\d)\\d\\d$", + Replacement: "${1}xx", + ResultKey: "resp_code_group", + }, }, - } - regex.Fields = []converter{ - { - Key: "request", - Pattern: "^/users/\\d+/$", - Replacement: "/users/{id}/", + Fields: []converter{ + { + Key: "request", + Pattern: "^/users/\\d+/$", + Replacement: "/users/{id}/", + }, }, + Log: testutil.Logger{}, } + require.NoError(b, regex.Init()) for n := 0; n < b.N; n++ { processed := regex.Apply(newM1()) From 5ac9f418caa8ba83f162ecf9852eb7e23cc3d083 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Fri, 5 Nov 2021 14:55:16 +0100 Subject: [PATCH 026/133] fix: Markdown linter fixes for LICENSE_OF_DEPENDENCIES.md (#10065) --- docs/LICENSE_OF_DEPENDENCIES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 995ad5f697ed1..e03a506bddd17 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -283,5 +283,7 @@ following works: - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) -## telegraf used and modified code from these projects + +## Telegraf used and modified code from these projects + - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) From 9871b676a535938d2782f46b577a32cdee748a49 Mon Sep 17 00:00:00 2001 From: Jacob Marble Date: Mon, 8 Nov 2021 13:41:50 -0800 Subject: [PATCH 027/133] chore: update OpenTelemetry plugins (#10010) --- go.mod | 28 ++++----- go.sum | 57 ++++++++++--------- plugins/inputs/opentelemetry/grpc_services.go | 19 ++++--- .../opentelemetry/opentelemetry_test.go | 4 +- .../outputs/opentelemetry/opentelemetry.go | 9 +-- .../opentelemetry/opentelemetry_test.go | 4 +- 6 files changed, 65 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 7b54ae3c6e47a..ddea0e554c44b 100644 --- a/go.mod +++ b/go.mod @@ -147,9 +147,9 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.7 - github.com/influxdata/influxdb-observability/influx2otel v0.2.7 - github.com/influxdata/influxdb-observability/otel2influx v0.2.7 + github.com/influxdata/influxdb-observability/common v0.2.8 + github.com/influxdata/influxdb-observability/influx2otel v0.2.8 + github.com/influxdata/influxdb-observability/otel2influx v0.2.8 github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 @@ -275,10 +275,10 @@ require ( go.etcd.io/etcd/api/v3 v3.5.0 // indirect go.mongodb.org/mongo-driver v1.7.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 - go.opentelemetry.io/otel/metric v0.23.0 - go.opentelemetry.io/otel/sdk/metric v0.23.0 + go.opentelemetry.io/collector/model v0.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 + go.opentelemetry.io/otel/metric v0.24.0 + go.opentelemetry.io/otel/sdk/metric v0.24.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -298,7 +298,7 @@ require ( google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.41.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 @@ -345,12 +345,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect github.com/rogpeppe/go-internal v1.6.2 // indirect - go.opentelemetry.io/otel v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect - go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect - go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect - go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.24.0 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect go.opentelemetry.io/proto/otlp v0.9.0 // indirect ) diff --git a/go.sum b/go.sum index ebbc319d44333..68faf74a86749 100644 --- a/go.sum +++ b/go.sum @@ -486,6 +486,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -722,6 +723,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -1226,12 +1228,12 @@ github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7m github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= -github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMoPChvIgbEOvghwc5NxngOgXThp9MFKs7efNLtQ= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= +github.com/influxdata/influxdb-observability/common v0.2.8 h1:QDvX7rNQkt1mHr2v8sw/OEupa32CxZHlO5f/tsyPCLw= +github.com/influxdata/influxdb-observability/common v0.2.8/go.mod h1:N2wfkPgJvi9CPK6MbNFkD70naEUxAMGCqFyxZXCJQDs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8 h1:XlVo4WLIFByOADn+88hPmR2SGJkdLppyIbw1BG2obp8= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8/go.mod h1:t9LeYL1mBiVRZBt5TfIj+4MBkJ/1POBxUlKSxEA+uj8= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8 h1:vTamg9mKUXHaXPtydrR1ejpqj/OKAGc56MiedXjlsnA= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8/go.mod h1:xKTR9GLOtkSekysDKhAFNrPYpeiFV31Sy6zDqF54axA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -2148,27 +2150,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= -go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= -go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/collector/model v0.37.0 h1:K1G6bgzBZ5kKSjZ1+EY9MhCOYsac4Q1K85fBUgpTVH8= +go.opentelemetry.io/collector/model v0.37.0/go.mod h1:ESh1oWDNdS4fTg9sTFoYuiuvs8QuaX8yNGTPix3JZc8= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= -go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= -go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= -go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= -go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= -go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= -go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= -go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= -go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= -go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= -go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= -go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 h1:NN6n2agAkT6j2o+1RPTFANclOnZ/3Z1ruRGL06NYACk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0/go.mod h1:kgWmavsno59/h5l9A9KXhvqrYxBhiQvJHPNhJkMP46s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 h1:QyIh7cAMItlzm8xQn9c6QxNEMUbYgXPx19irR/pmgdI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0/go.mod h1:BpCT1zDnUgcUc3VqFVkxH/nkx6cM8XlCPsQsxaOzUNM= +go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA= +go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= +go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU= +go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0 h1:innKi8LQebwPI+WEuEKEWMjhWC5mXQG1/WpSm5mffSY= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y= +go.opentelemetry.io/otel/sdk/metric v0.24.0 h1:LLHrZikGdEHoHihwIPvfFRJX+T+NdrU2zgEqf7tQ7Oo= +go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= @@ -2846,8 +2848,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index 1c805e2a23ff2..437c723db3e28 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/influxdb-observability/common" "github.com/influxdata/influxdb-observability/otel2influx" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" ) type traceService struct { @@ -15,6 +14,8 @@ type traceService struct { writer *writeToAccumulator } +var _ otlpgrpc.TracesServer = (*traceService)(nil) + func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { converter := otel2influx.NewOtelTracesToLineProtocol(logger) return &traceService{ @@ -23,8 +24,8 @@ func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceSer } } -func (s *traceService) Export(ctx context.Context, req pdata.Traces) (otlpgrpc.TracesResponse, error) { - err := s.converter.WriteTraces(ctx, req, s.writer) +func (s *traceService) Export(ctx context.Context, req otlpgrpc.TracesRequest) (otlpgrpc.TracesResponse, error) { + err := s.converter.WriteTraces(ctx, req.Traces(), s.writer) return otlpgrpc.NewTracesResponse(), err } @@ -33,6 +34,8 @@ type metricsService struct { writer *writeToAccumulator } +var _ otlpgrpc.MetricsServer = (*metricsService)(nil) + var metricsSchemata = map[string]common.MetricsSchema{ "prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1, "prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2, @@ -54,8 +57,8 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema }, nil } -func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { - err := s.converter.WriteMetrics(ctx, req, s.writer) +func (s *metricsService) Export(ctx context.Context, req otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + err := s.converter.WriteMetrics(ctx, req.Metrics(), s.writer) return otlpgrpc.NewMetricsResponse(), err } @@ -64,6 +67,8 @@ type logsService struct { writer *writeToAccumulator } +var _ otlpgrpc.LogsServer = (*logsService)(nil) + func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { converter := otel2influx.NewOtelLogsToLineProtocol(logger) return &logsService{ @@ -72,7 +77,7 @@ func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsServi } } -func (s *logsService) Export(ctx context.Context, req pdata.Logs) (otlpgrpc.LogsResponse, error) { - err := s.converter.WriteLogs(ctx, req, s.writer) +func (s *logsService) Export(ctx context.Context, req otlpgrpc.LogsRequest) (otlpgrpc.LogsResponse, error) { + err := s.converter.WriteLogs(ctx, req.Logs(), s.writer) return otlpgrpc.NewLogsResponse(), err } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 8df1273bef8c4..4704d779dfd49 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -42,7 +42,7 @@ func TestOpenTelemetry(t *testing.T) { t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) pusher := controller.New( - processor.New( + processor.NewFactory( simple.NewWithExactDistribution(), metricExporter, ), @@ -53,7 +53,7 @@ func TestOpenTelemetry(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) - global.SetMeterProvider(pusher.MeterProvider()) + global.SetMeterProvider(pusher) // write metrics meter := global.Meter("library-name") diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index e1bbc9322e759..7cfe1341b3ff4 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -157,15 +157,16 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { } } - md := batch.GetMetrics() - if md.ResourceMetrics().Len() == 0 { + md := otlpgrpc.NewMetricsRequest() + md.SetMetrics(batch.GetMetrics()) + if md.Metrics().ResourceMetrics().Len() == 0 { return nil } if len(o.Attributes) > 0 { - for i := 0; i < md.ResourceMetrics().Len(); i++ { + for i := 0; i < md.Metrics().ResourceMetrics().Len(); i++ { for k, v := range o.Attributes { - md.ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + md.Metrics().ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) } } } diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index 6ebf1829bd540..c2f9f1980410d 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -133,8 +133,8 @@ func (m *mockOtelService) Address() string { return m.listener.Addr().String() } -func (m *mockOtelService) Export(ctx context.Context, request pdata.Metrics) (otlpgrpc.MetricsResponse, error) { - m.metrics = request.Clone() +func (m *mockOtelService) Export(ctx context.Context, request otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + m.metrics = request.Metrics().Clone() ctxMetadata, ok := metadata.FromIncomingContext(ctx) assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) assert.True(m.t, ok) From 0133f1206b4128dc9a1b9b9202793c279c263baf Mon Sep 17 00:00:00 2001 From: Fan Zhang <385741668@qq.com> Date: Tue, 9 Nov 2021 05:42:55 +0800 Subject: [PATCH 028/133] fix: Set the default value correctly (#9980) --- plugins/inputs/nvidia_smi/README.md | 8 +++++++- plugins/inputs/nvidia_smi/nvidia_smi.go | 19 +++++++++++++++---- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index c889e016fc464..479634d7befb0 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -7,13 +7,19 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling # timeout = "5s" ``` +#### Linux + +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` + #### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 3e4fb03f04221..68f25ba428611 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -31,7 +31,9 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling @@ -39,12 +41,21 @@ func (smi *NvidiaSMI) SampleConfig() string { ` } -// Gather implements the telegraf interface -func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { +func (smi *NvidiaSMI) Init() error { if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) + binPath, err := exec.LookPath("nvidia-smi") + // fail-fast + if err != nil { + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + } + smi.BinPath = binPath } + return nil +} + +// Gather implements the telegraf interface +func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { data, err := smi.pollSMI() if err != nil { return err From e73ffe56c46dbafe1d700864ae4dae3e63b02125 Mon Sep 17 00:00:00 2001 From: AlphaAr Date: Mon, 8 Nov 2021 18:47:32 -0300 Subject: [PATCH 029/133] fix: Add metric name is a label with name "__name" to Loki output plugin (#10001) --- plugins/outputs/loki/README.md | 2 +- plugins/outputs/loki/loki.go | 2 ++ plugins/outputs/loki/loki_test.go | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/loki/README.md b/plugins/outputs/loki/README.md index 681d3b207c1af..6c7eb91c8916a 100644 --- a/plugins/outputs/loki/README.md +++ b/plugins/outputs/loki/README.md @@ -1,6 +1,6 @@ # Loki Output Plugin -This plugin sends logs to Loki, using tags as labels, +This plugin sends logs to Loki, using metric name and tags as labels, log line will content all fields in `key="value"` format which is easily parsable with `logfmt` parser in Loki. Logs within each stream are sorted by timestamp before being sent to Loki. diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 07d4d473bf396..fcf96e55f6429 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -143,6 +143,8 @@ func (l *Loki) Write(metrics []telegraf.Metric) error { }) for _, m := range metrics { + m.AddTag("__name", m.Name()) + tags := m.TagList() var line string diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index ba6d0808fabaa..6f0678e8dd4b5 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -225,7 +225,7 @@ func TestContentEncodingGzip(t *testing.T) { require.Len(t, s.Streams, 1) require.Len(t, s.Streams[0].Logs, 1) require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) require.Equal(t, "123000000000", s.Streams[0].Logs[0][0]) require.Contains(t, s.Streams[0].Logs[0][1], "line=\"my log\"") require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") @@ -404,7 +404,7 @@ func TestMetricSorting(t *testing.T) { require.Len(t, s.Streams, 1) require.Len(t, s.Streams[0].Logs, 2) require.Len(t, s.Streams[0].Logs[0], 2) - require.Equal(t, map[string]string{"key1": "value1"}, s.Streams[0].Labels) + require.Equal(t, map[string]string{"__name": "log", "key1": "value1"}, s.Streams[0].Labels) require.Equal(t, "456000000000", s.Streams[0].Logs[0][0]) require.Contains(t, s.Streams[0].Logs[0][1], "line=\"older log\"") require.Contains(t, s.Streams[0].Logs[0][1], "field=\"3.14\"") From ddeb6ec890528494bd041566989cd8ae9897cacf Mon Sep 17 00:00:00 2001 From: Dane Strandboge Date: Tue, 9 Nov 2021 08:51:14 -0600 Subject: [PATCH 030/133] build: move to new protobuf library (#10019) --- go.mod | 10 +- go.sum | 9 +- .../cisco_telemetry_mdt.go | 5 +- .../cisco_telemetry_mdt_test.go | 2 +- .../cisco_telemetry_util.go | 3 +- .../auth/authentication_service.pb.go | 314 ++- .../auth/authentication_service.proto | 1 + .../auth/authentication_service_grpc.pb.go | 101 + .../inputs/jti_openconfig_telemetry/gen.go | 11 + .../jti_openconfig_telemetry/oc/oc.pb.go | 2328 +++++++++++------ .../jti_openconfig_telemetry/oc/oc.proto | 1 + .../jti_openconfig_telemetry/oc/oc_grpc.pb.go | 293 +++ .../openconfig_telemetry_test.go | 1 + .../riemann_listener/riemann_listener.go | 8 +- .../riemann_listener/riemann_listener_test.go | 5 +- plugins/inputs/stackdriver/stackdriver.go | 14 +- .../inputs/stackdriver/stackdriver_test.go | 28 +- plugins/outputs/stackdriver/stackdriver.go | 8 +- .../outputs/stackdriver/stackdriver_test.go | 18 +- .../parsers/prometheusremotewrite/parser.go | 3 +- plugins/serializers/prometheus/collection.go | 2 +- .../serializers/prometheus/collection_test.go | 2 +- .../prometheusremotewrite.go | 4 +- .../prometheusremotewrite_test.go | 3 +- 24 files changed, 2158 insertions(+), 1016 deletions(-) create mode 100644 plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/gen.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go diff --git a/go.mod b/go.mod index ddea0e554c44b..950ff837bd873 100644 --- a/go.mod +++ b/go.mod @@ -113,12 +113,10 @@ require ( github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.1.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-cmp v0.5.6 @@ -338,6 +336,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect @@ -365,3 +365,9 @@ replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-2021 //https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. //Replacing with the pseudo-version works around this. replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 + +// replaced due to open PR updating protobuf https://github.com/cisco-ie/nx-telemetry-proto/pull/1 +replace github.com/cisco-ie/nx-telemetry-proto => github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc + +// replaced due to open PR updating protobuf https://github.com/riemann/riemann-go-client/pull/27 +replace github.com/riemann/riemann-go-client => github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 diff --git a/go.sum b/go.sum index 68faf74a86749..22f1d61b5c323 100644 --- a/go.sum +++ b/go.sum @@ -478,8 +478,6 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -691,6 +689,8 @@ github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1: github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 h1:aDtw0/++yjOoiXB9sldaFYW61mK3m6ia/wYWxPLrwYY= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -993,7 +993,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1836,8 +1835,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= -github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= @@ -1873,6 +1870,8 @@ github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc h1:9RAsqOFf0U5CuwXR/Jff3nXTv6tAQNN7U4A/2cBRXFc= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 10f1f764c0515..25b5ec9758962 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -15,11 +15,11 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "google.golang.org/grpc" "google.golang.org/grpc/credentials" _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -61,6 +61,9 @@ type CiscoTelemetryMDT struct { mutex sync.Mutex acc telegraf.Accumulator wg sync.WaitGroup + + // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility + dialout.UnimplementedGRPCMdtDialoutServer } type NxPayloadXfromStructure struct { diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 745b26dea4b20..90fc949276948 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -10,9 +10,9 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index 8f6ea93eab4b3..1d7d95a95a757 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -1,9 +1,10 @@ package cisco_telemetry_mdt import ( - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "strconv" "strings" + + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" ) //xform Field to string diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go index 7ddeefacab635..1342758887932 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go @@ -1,182 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: authentication_service.proto +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: auth/authentication_service.proto -/* -Package authentication is a generated protocol buffer package. - -It is generated from these files: - authentication_service.proto - -It has these top-level messages: - LoginRequest - LoginReply -*/ package authentication -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The request message containing the user's name, password and client id type LoginRequest struct { - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *LoginRequest) Reset() { + *x = LoginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoginRequest) ProtoMessage() {} + +func (x *LoginRequest) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LoginRequest) Reset() { *m = LoginRequest{} } -func (m *LoginRequest) String() string { return proto.CompactTextString(m) } -func (*LoginRequest) ProtoMessage() {} -func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. +func (*LoginRequest) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{0} +} -func (m *LoginRequest) GetUserName() string { - if m != nil { - return m.UserName +func (x *LoginRequest) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *LoginRequest) GetPassword() string { - if m != nil { - return m.Password +func (x *LoginRequest) GetPassword() string { + if x != nil { + return x.Password } return "" } -func (m *LoginRequest) GetClientId() string { - if m != nil { - return m.ClientId +func (x *LoginRequest) GetClientId() string { + if x != nil { + return x.ClientId } return "" } +// // The response message containing the result of login attempt. // result value of true indicates success and false indicates // failure type LoginReply struct { - Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LoginReply) Reset() { *m = LoginReply{} } -func (m *LoginReply) String() string { return proto.CompactTextString(m) } -func (*LoginReply) ProtoMessage() {} -func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} -func (m *LoginReply) GetResult() bool { - if m != nil { - return m.Result +func (x *LoginReply) Reset() { + *x = LoginReply{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func init() { - proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest") - proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply") +func (x *LoginReply) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*LoginReply) ProtoMessage() {} -// Client API for Login service - -type LoginClient interface { - LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) -} - -type loginClient struct { - cc *grpc.ClientConn +func (x *LoginReply) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func NewLoginClient(cc *grpc.ClientConn) LoginClient { - return &loginClient{cc} +// Deprecated: Use LoginReply.ProtoReflect.Descriptor instead. +func (*LoginReply) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{1} } -func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { - out := new(LoginReply) - err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *LoginReply) GetResult() bool { + if x != nil { + return x.Result } - return out, nil + return false } -// Server API for Login service +var File_auth_authentication_service_proto protoreflect.FileDescriptor + +var file_auth_authentication_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x32, + 0x51, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x48, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x12, 0x5a, 0x10, 0x2e, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_auth_authentication_service_proto_rawDescOnce sync.Once + file_auth_authentication_service_proto_rawDescData = file_auth_authentication_service_proto_rawDesc +) -type LoginServer interface { - LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) +func file_auth_authentication_service_proto_rawDescGZIP() []byte { + file_auth_authentication_service_proto_rawDescOnce.Do(func() { + file_auth_authentication_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_authentication_service_proto_rawDescData) + }) + return file_auth_authentication_service_proto_rawDescData } -func RegisterLoginServer(s *grpc.Server, srv LoginServer) { - s.RegisterService(&_Login_serviceDesc, srv) +var file_auth_authentication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_auth_authentication_service_proto_goTypes = []interface{}{ + (*LoginRequest)(nil), // 0: authentication.LoginRequest + (*LoginReply)(nil), // 1: authentication.LoginReply +} +var file_auth_authentication_service_proto_depIdxs = []int32{ + 0, // 0: authentication.Login.LoginCheck:input_type -> authentication.LoginRequest + 1, // 1: authentication.Login.LoginCheck:output_type -> authentication.LoginReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoginServer).LoginCheck(ctx, in) +func init() { file_auth_authentication_service_proto_init() } +func file_auth_authentication_service_proto_init() { + if File_auth_authentication_service_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/authentication.Login/LoginCheck", + if !protoimpl.UnsafeEnabled { + file_auth_authentication_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_auth_authentication_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Login_serviceDesc = grpc.ServiceDesc{ - ServiceName: "authentication.Login", - HandlerType: (*LoginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoginCheck", - Handler: _Login_LoginCheck_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_auth_authentication_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "authentication_service.proto", -} - -func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9, - 0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, - 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2, - 0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2, - 0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, - 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97, - 0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, - 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b, - 0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8, - 0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39, - 0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21, - 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00, + GoTypes: file_auth_authentication_service_proto_goTypes, + DependencyIndexes: file_auth_authentication_service_proto_depIdxs, + MessageInfos: file_auth_authentication_service_proto_msgTypes, + }.Build() + File_auth_authentication_service_proto = out.File + file_auth_authentication_service_proto_rawDesc = nil + file_auth_authentication_service_proto_goTypes = nil + file_auth_authentication_service_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto index a41e13a09f7d9..f67b67a6c5730 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto @@ -25,6 +25,7 @@ syntax = "proto3"; package authentication; +option go_package = ".;authentication"; // The Login service definition. service Login { diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go new file mode 100644 index 0000000000000..bbbf200ec68be --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package authentication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoginClient is the client API for Login service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoginClient interface { + LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) +} + +type loginClient struct { + cc grpc.ClientConnInterface +} + +func NewLoginClient(cc grpc.ClientConnInterface) LoginClient { + return &loginClient{cc} +} + +func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { + out := new(LoginReply) + err := c.cc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LoginServer is the server API for Login service. +// All implementations must embed UnimplementedLoginServer +// for forward compatibility +type LoginServer interface { + LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) + mustEmbedUnimplementedLoginServer() +} + +// UnimplementedLoginServer must be embedded to have forward compatible implementations. +type UnimplementedLoginServer struct { +} + +func (UnimplementedLoginServer) LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoginCheck not implemented") +} +func (UnimplementedLoginServer) mustEmbedUnimplementedLoginServer() {} + +// UnsafeLoginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoginServer will +// result in compilation errors. +type UnsafeLoginServer interface { + mustEmbedUnimplementedLoginServer() +} + +func RegisterLoginServer(s grpc.ServiceRegistrar, srv LoginServer) { + s.RegisterService(&Login_ServiceDesc, srv) +} + +func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoginServer).LoginCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/authentication.Login/LoginCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Login_ServiceDesc is the grpc.ServiceDesc for Login service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Login_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "authentication.Login", + HandlerType: (*LoginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LoginCheck", + Handler: _Login_LoginCheck_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth/authentication_service.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/gen.go b/plugins/inputs/jti_openconfig_telemetry/gen.go new file mode 100644 index 0000000000000..0b97e3bea9e55 --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/gen.go @@ -0,0 +1,11 @@ +package jti_openconfig_telemetry + +// To run these commands, make sure that protoc-gen-go and protoc-gen-go-grpc are installed +// > go install google.golang.org/protobuf/cmd/protoc-gen-go +// > go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +// +// Generated files were last generated with: +// - protoc-gen-go: v1.27.1 +// - protoc-gen-go-grpc: v1.1.0 +//go:generate protoc --go_out=auth/ --go-grpc_out=auth/ auth/authentication_service.proto +//go:generate protoc --go_out=oc/ --go-grpc_out=oc/ oc/oc.proto diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index bc7c780458f99..19d16dccc501a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -1,54 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: oc.proto - -/* -Package telemetry is a generated protocol buffer package. - -It is generated from these files: - oc.proto - -It has these top-level messages: - SubscriptionRequest - SubscriptionInput - Collector - Path - SubscriptionAdditionalConfig - SubscriptionReply - SubscriptionResponse - OpenConfigData - KeyValue - Delete - Eom - CancelSubscriptionRequest - CancelSubscriptionReply - GetSubscriptionsRequest - GetSubscriptionsReply - GetOperationalStateRequest - GetOperationalStateReply - DataEncodingRequest - DataEncodingReply -*/ -package telemetry +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: oc/oc.proto -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package telemetry import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Result of the operation type ReturnCode int32 @@ -59,21 +29,46 @@ const ( ReturnCode_UNKNOWN_ERROR ReturnCode = 2 ) -var ReturnCode_name = map[int32]string{ - 0: "SUCCESS", - 1: "NO_SUBSCRIPTION_ENTRY", - 2: "UNKNOWN_ERROR", -} -var ReturnCode_value = map[string]int32{ - "SUCCESS": 0, - "NO_SUBSCRIPTION_ENTRY": 1, - "UNKNOWN_ERROR": 2, +// Enum value maps for ReturnCode. +var ( + ReturnCode_name = map[int32]string{ + 0: "SUCCESS", + 1: "NO_SUBSCRIPTION_ENTRY", + 2: "UNKNOWN_ERROR", + } + ReturnCode_value = map[string]int32{ + "SUCCESS": 0, + "NO_SUBSCRIPTION_ENTRY": 1, + "UNKNOWN_ERROR": 2, + } +) + +func (x ReturnCode) Enum() *ReturnCode { + p := new(ReturnCode) + *p = x + return p } func (x ReturnCode) String() string { - return proto.EnumName(ReturnCode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReturnCode) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[0].Descriptor() +} + +func (ReturnCode) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[0] +} + +func (x ReturnCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReturnCode.Descriptor instead. +func (ReturnCode) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} } -func (ReturnCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Verbosity Level type VerbosityLevel int32 @@ -84,21 +79,46 @@ const ( VerbosityLevel_BRIEF VerbosityLevel = 2 ) -var VerbosityLevel_name = map[int32]string{ - 0: "DETAIL", - 1: "TERSE", - 2: "BRIEF", -} -var VerbosityLevel_value = map[string]int32{ - "DETAIL": 0, - "TERSE": 1, - "BRIEF": 2, +// Enum value maps for VerbosityLevel. +var ( + VerbosityLevel_name = map[int32]string{ + 0: "DETAIL", + 1: "TERSE", + 2: "BRIEF", + } + VerbosityLevel_value = map[string]int32{ + "DETAIL": 0, + "TERSE": 1, + "BRIEF": 2, + } +) + +func (x VerbosityLevel) Enum() *VerbosityLevel { + p := new(VerbosityLevel) + *p = x + return p } func (x VerbosityLevel) String() string { - return proto.EnumName(VerbosityLevel_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VerbosityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[1].Descriptor() +} + +func (VerbosityLevel) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[1] +} + +func (x VerbosityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VerbosityLevel.Descriptor instead. +func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} } -func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Encoding Type Supported type EncodingType int32 @@ -110,126 +130,248 @@ const ( EncodingType_PROTO3 EncodingType = 3 ) -var EncodingType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "XML", - 2: "JSON_IETF", - 3: "PROTO3", -} -var EncodingType_value = map[string]int32{ - "UNDEFINED": 0, - "XML": 1, - "JSON_IETF": 2, - "PROTO3": 3, +// Enum value maps for EncodingType. +var ( + EncodingType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "XML", + 2: "JSON_IETF", + 3: "PROTO3", + } + EncodingType_value = map[string]int32{ + "UNDEFINED": 0, + "XML": 1, + "JSON_IETF": 2, + "PROTO3": 3, + } +) + +func (x EncodingType) Enum() *EncodingType { + p := new(EncodingType) + *p = x + return p } func (x EncodingType) String() string { - return proto.EnumName(EncodingType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncodingType) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[2].Descriptor() +} + +func (EncodingType) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[2] +} + +func (x EncodingType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncodingType.Descriptor instead. +func (EncodingType) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} } -func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // Message sent for a telemetry subscription request type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data associated with a telemetry subscription - Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` // The below configuration is not defined in Openconfig RPC. // It is a proposed extension to configure additional // subscription request features. - AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig" json:"additional_config,omitempty"` + AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"` } -func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } -func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*SubscriptionRequest) ProtoMessage() {} -func (*SubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionRequest) GetInput() *SubscriptionInput { - if m != nil { - return m.Input +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriptionRequest) GetInput() *SubscriptionInput { + if x != nil { + return x.Input } return nil } -func (m *SubscriptionRequest) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionRequest) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } -func (m *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { - if m != nil { - return m.AdditionalConfig +func (x *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { + if x != nil { + return x.AdditionalConfig } return nil } // Data associated with a telemetry subscription type SubscriptionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of optional collector endpoints to send data for // this subscription. // If no collector destinations are specified, the collector // destination is assumed to be the requester on the rpc channel. - CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList" json:"collector_list,omitempty"` + CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList,proto3" json:"collector_list,omitempty"` +} + +func (x *SubscriptionInput) Reset() { + *x = SubscriptionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SubscriptionInput) Reset() { *m = SubscriptionInput{} } -func (m *SubscriptionInput) String() string { return proto.CompactTextString(m) } -func (*SubscriptionInput) ProtoMessage() {} -func (*SubscriptionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *SubscriptionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *SubscriptionInput) GetCollectorList() []*Collector { - if m != nil { - return m.CollectorList +func (*SubscriptionInput) ProtoMessage() {} + +func (x *SubscriptionInput) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionInput.ProtoReflect.Descriptor instead. +func (*SubscriptionInput) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionInput) GetCollectorList() []*Collector { + if x != nil { + return x.CollectorList } return nil } // Collector endpoints to send data specified as an ip+port combination. type Collector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // IP address of collector endpoint - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Transport protocol port number for the collector destination. - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (m *Collector) Reset() { *m = Collector{} } -func (m *Collector) String() string { return proto.CompactTextString(m) } -func (*Collector) ProtoMessage() {} -func (*Collector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *Collector) Reset() { + *x = Collector{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Collector) GetAddress() string { - if m != nil { - return m.Address +func (x *Collector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collector) ProtoMessage() {} + +func (x *Collector) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collector.ProtoReflect.Descriptor instead. +func (*Collector) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} +} + +func (x *Collector) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Collector) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Collector) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } // Data model path type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data model path of interest // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Regular expression to be used in filtering state leaves - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // If this is set to true, the target device will only send // updates to the collector upon a change in data value - SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged" json:"suppress_unchanged,omitempty"` + SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged,proto3" json:"suppress_unchanged,omitempty"` // Maximum time in ms the target device may go without sending // a message to the collector. If this time expires with // suppress-unchanged set, the target device must send an update // message regardless if the data values have changed. - MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval" json:"max_silent_interval,omitempty"` + MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval,proto3" json:"max_silent_interval,omitempty"` // Time in ms between collection and transmission of the // specified data to the collector platform. The target device // will sample the corresponding data (e.g,. a counter) and @@ -237,143 +379,263 @@ type Path struct { // // If sample-frequency is set to 0, then the network device // must emit an update upon every datum change. - SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency" json:"sample_frequency,omitempty"` + SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency,proto3" json:"sample_frequency,omitempty"` // EOM needed for each walk cycle of this path? // For periodic sensor, applicable for each complete reap // For event sensor, applicable when initial dump is over // (same as EOS) // This feature is not implemented currently. - NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom" json:"need_eom,omitempty"` + NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom,proto3" json:"need_eom,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Path) GetPath() string { - if m != nil { - return m.Path +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{3} +} + +func (x *Path) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *Path) GetFilter() string { - if m != nil { - return m.Filter +func (x *Path) GetFilter() string { + if x != nil { + return x.Filter } return "" } -func (m *Path) GetSuppressUnchanged() bool { - if m != nil { - return m.SuppressUnchanged +func (x *Path) GetSuppressUnchanged() bool { + if x != nil { + return x.SuppressUnchanged } return false } -func (m *Path) GetMaxSilentInterval() uint32 { - if m != nil { - return m.MaxSilentInterval +func (x *Path) GetMaxSilentInterval() uint32 { + if x != nil { + return x.MaxSilentInterval } return 0 } -func (m *Path) GetSampleFrequency() uint32 { - if m != nil { - return m.SampleFrequency +func (x *Path) GetSampleFrequency() uint32 { + if x != nil { + return x.SampleFrequency } return 0 } -func (m *Path) GetNeedEom() bool { - if m != nil { - return m.NeedEom +func (x *Path) GetNeedEom() bool { + if x != nil { + return x.NeedEom } return false } // Configure subscription request additional features. type SubscriptionAdditionalConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // limit the number of records sent in the stream - LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords" json:"limit_records,omitempty"` + LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords,proto3" json:"limit_records,omitempty"` // limit the time the stream remains open - LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds" json:"limit_time_seconds,omitempty"` + LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds,proto3" json:"limit_time_seconds,omitempty"` // EOS needed for this subscription? - NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos" json:"need_eos,omitempty"` + NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos,proto3" json:"need_eos,omitempty"` +} + +func (x *SubscriptionAdditionalConfig) Reset() { + *x = SubscriptionAdditionalConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionAdditionalConfig) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscriptionAdditionalConfig) Reset() { *m = SubscriptionAdditionalConfig{} } -func (m *SubscriptionAdditionalConfig) String() string { return proto.CompactTextString(m) } -func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (m *SubscriptionAdditionalConfig) GetLimitRecords() int32 { - if m != nil { - return m.LimitRecords +func (x *SubscriptionAdditionalConfig) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionAdditionalConfig.ProtoReflect.Descriptor instead. +func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{4} +} + +func (x *SubscriptionAdditionalConfig) GetLimitRecords() int32 { + if x != nil { + return x.LimitRecords } return 0 } -func (m *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { - if m != nil { - return m.LimitTimeSeconds +func (x *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { + if x != nil { + return x.LimitTimeSeconds } return 0 } -func (m *SubscriptionAdditionalConfig) GetNeedEos() bool { - if m != nil { - return m.NeedEos +func (x *SubscriptionAdditionalConfig) GetNeedEos() bool { + if x != nil { + return x.NeedEos } return false } // 1. Reply data message sent out using out-of-band channel. type SubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Response message to a telemetry subscription creation or // get request. - Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` } -func (m *SubscriptionReply) Reset() { *m = SubscriptionReply{} } -func (m *SubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*SubscriptionReply) ProtoMessage() {} -func (*SubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *SubscriptionReply) Reset() { + *x = SubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionReply) GetResponse() *SubscriptionResponse { - if m != nil { - return m.Response +func (x *SubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionReply) ProtoMessage() {} + +func (x *SubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionReply.ProtoReflect.Descriptor instead. +func (*SubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{5} +} + +func (x *SubscriptionReply) GetResponse() *SubscriptionResponse { + if x != nil { + return x.Response } return nil } -func (m *SubscriptionReply) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionReply) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } // Response message to a telemetry subscription creation or get request. type SubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique id for the subscription on the device. This is // generated by the device and returned in a subscription // request or when listing existing subscriptions - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } -func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } -func (*SubscriptionResponse) ProtoMessage() {} -func (*SubscriptionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *SubscriptionResponse) Reset() { + *x = SubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionResponse) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *SubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionResponse) ProtoMessage() {} + +func (x *SubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionResponse.ProtoReflect.Descriptor instead. +func (*SubscriptionResponse) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{6} +} + +func (x *SubscriptionResponse) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } @@ -381,112 +643,147 @@ func (m *SubscriptionResponse) GetSubscriptionId() uint32 { // 2. Telemetry data send back on the same connection as the // subscription request. type OpenConfigData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // router name:export IP address - SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId" json:"system_id,omitempty"` + SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id,omitempty"` // line card / RE (slot number) - ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId" json:"component_id,omitempty"` + ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // PFE (if applicable) - SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId" json:"sub_component_id,omitempty"` + SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId,proto3" json:"sub_component_id,omitempty"` // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,4,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Sequence number, monotonically increasing for each // system_id, component_id, sub_component_id + path. - SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` // timestamp (milliseconds since epoch) - Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // List of key-value pairs - Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv,proto3" json:"kv,omitempty"` // For delete. If filled, it indicates delete - Delete []*Delete `protobuf:"bytes,8,rep,name=delete" json:"delete,omitempty"` + Delete []*Delete `protobuf:"bytes,8,rep,name=delete,proto3" json:"delete,omitempty"` // If filled, it indicates end of marker for the // respective path in the list. - Eom []*Eom `protobuf:"bytes,9,rep,name=eom" json:"eom,omitempty"` + Eom []*Eom `protobuf:"bytes,9,rep,name=eom,proto3" json:"eom,omitempty"` // If filled, it indicates end of sync for complete subscription - SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse" json:"sync_response,omitempty"` + SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse,proto3" json:"sync_response,omitempty"` } -func (m *OpenConfigData) Reset() { *m = OpenConfigData{} } -func (m *OpenConfigData) String() string { return proto.CompactTextString(m) } -func (*OpenConfigData) ProtoMessage() {} -func (*OpenConfigData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *OpenConfigData) Reset() { + *x = OpenConfigData{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *OpenConfigData) GetSystemId() string { - if m != nil { - return m.SystemId +func (x *OpenConfigData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenConfigData) ProtoMessage() {} + +func (x *OpenConfigData) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenConfigData.ProtoReflect.Descriptor instead. +func (*OpenConfigData) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{7} +} + +func (x *OpenConfigData) GetSystemId() string { + if x != nil { + return x.SystemId } return "" } -func (m *OpenConfigData) GetComponentId() uint32 { - if m != nil { - return m.ComponentId +func (x *OpenConfigData) GetComponentId() uint32 { + if x != nil { + return x.ComponentId } return 0 } -func (m *OpenConfigData) GetSubComponentId() uint32 { - if m != nil { - return m.SubComponentId +func (x *OpenConfigData) GetSubComponentId() uint32 { + if x != nil { + return x.SubComponentId } return 0 } -func (m *OpenConfigData) GetPath() string { - if m != nil { - return m.Path +func (x *OpenConfigData) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *OpenConfigData) GetSequenceNumber() uint64 { - if m != nil { - return m.SequenceNumber +func (x *OpenConfigData) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber } return 0 } -func (m *OpenConfigData) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp +func (x *OpenConfigData) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp } return 0 } -func (m *OpenConfigData) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *OpenConfigData) GetKv() []*KeyValue { + if x != nil { + return x.Kv } return nil } -func (m *OpenConfigData) GetDelete() []*Delete { - if m != nil { - return m.Delete +func (x *OpenConfigData) GetDelete() []*Delete { + if x != nil { + return x.Delete } return nil } -func (m *OpenConfigData) GetEom() []*Eom { - if m != nil { - return m.Eom +func (x *OpenConfigData) GetEom() []*Eom { + if x != nil { + return x.Eom } return nil } -func (m *OpenConfigData) GetSyncResponse() bool { - if m != nil { - return m.SyncResponse +func (x *OpenConfigData) GetSyncResponse() bool { + if x != nil { + return x.SyncResponse } return false } // Simple Key-value, where value could be one of scalar types type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Key - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // One of possible values // - // Types that are valid to be assigned to Value: + // Types that are assignable to Value: // *KeyValue_DoubleValue // *KeyValue_IntValue // *KeyValue_UintValue @@ -497,44 +794,44 @@ type KeyValue struct { Value isKeyValue_Value `protobuf_oneof:"value"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -type isKeyValue_Value interface { - isKeyValue_Value() +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type KeyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,oneof"` +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) } -type KeyValue_IntValue struct { - IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,oneof"` -} -type KeyValue_UintValue struct { - UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,oneof"` -} -type KeyValue_SintValue struct { - SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,oneof"` -} -type KeyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,oneof"` -} -type KeyValue_StrValue struct { - StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,oneof"` + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type KeyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{8} } -func (*KeyValue_DoubleValue) isKeyValue_Value() {} -func (*KeyValue_IntValue) isKeyValue_Value() {} -func (*KeyValue_UintValue) isKeyValue_Value() {} -func (*KeyValue_SintValue) isKeyValue_Value() {} -func (*KeyValue_BoolValue) isKeyValue_Value() {} -func (*KeyValue_StrValue) isKeyValue_Value() {} -func (*KeyValue_BytesValue) isKeyValue_Value() {} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} func (m *KeyValue) GetValue() isKeyValue_Value { if m != nil { @@ -543,323 +840,412 @@ func (m *KeyValue) GetValue() isKeyValue_Value { return nil } -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*KeyValue_DoubleValue); ok { +func (x *KeyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*KeyValue_DoubleValue); ok { return x.DoubleValue } return 0 } -func (m *KeyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*KeyValue_IntValue); ok { +func (x *KeyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*KeyValue_IntValue); ok { return x.IntValue } return 0 } -func (m *KeyValue) GetUintValue() uint64 { - if x, ok := m.GetValue().(*KeyValue_UintValue); ok { +func (x *KeyValue) GetUintValue() uint64 { + if x, ok := x.GetValue().(*KeyValue_UintValue); ok { return x.UintValue } return 0 } -func (m *KeyValue) GetSintValue() int64 { - if x, ok := m.GetValue().(*KeyValue_SintValue); ok { +func (x *KeyValue) GetSintValue() int64 { + if x, ok := x.GetValue().(*KeyValue_SintValue); ok { return x.SintValue } return 0 } -func (m *KeyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*KeyValue_BoolValue); ok { +func (x *KeyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*KeyValue_BoolValue); ok { return x.BoolValue } return false } -func (m *KeyValue) GetStrValue() string { - if x, ok := m.GetValue().(*KeyValue_StrValue); ok { +func (x *KeyValue) GetStrValue() string { + if x, ok := x.GetValue().(*KeyValue_StrValue); ok { return x.StrValue } return "" } -func (m *KeyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*KeyValue_BytesValue); ok { +func (x *KeyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*KeyValue_BytesValue); ok { return x.BytesValue } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*KeyValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _KeyValue_OneofMarshaler, _KeyValue_OneofUnmarshaler, _KeyValue_OneofSizer, []interface{}{ - (*KeyValue_DoubleValue)(nil), - (*KeyValue_IntValue)(nil), - (*KeyValue_UintValue)(nil), - (*KeyValue_SintValue)(nil), - (*KeyValue_BoolValue)(nil), - (*KeyValue_StrValue)(nil), - (*KeyValue_BytesValue)(nil), - } +type isKeyValue_Value interface { + isKeyValue_Value() } -func _KeyValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *KeyValue_IntValue: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - b.EncodeVarint(8<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.SintValue)) - case *KeyValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *KeyValue_StrValue: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StrValue) - case *KeyValue_BytesValue: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.BytesValue) - case nil: - default: - return fmt.Errorf("KeyValue.Value has unexpected type %T", x) - } - return nil +type KeyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` } -func _KeyValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*KeyValue) - switch tag { - case 5: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &KeyValue_DoubleValue{math.Float64frombits(x)} - return true, err - case 6: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_IntValue{int64(x)} - return true, err - case 7: // value.uint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_UintValue{x} - return true, err - case 8: // value.sint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Value = &KeyValue_SintValue{int64(x)} - return true, err - case 9: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_BoolValue{x != 0} - return true, err - case 10: // value.str_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &KeyValue_StrValue{x} - return true, err - case 11: // value.bytes_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Value = &KeyValue_BytesValue{x} - return true, err - default: - return false, nil - } -} - -func _KeyValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *KeyValue_IntValue: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.SintValue<<1) ^ uint64((int64(x.SintValue) >> 63)))) - case *KeyValue_BoolValue: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += 1 - case *KeyValue_StrValue: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StrValue))) - n += len(x.StrValue) - case *KeyValue_BytesValue: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.BytesValue))) - n += len(x.BytesValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type KeyValue_IntValue struct { + IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type KeyValue_UintValue struct { + UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,proto3,oneof"` +} + +type KeyValue_SintValue struct { + SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,proto3,oneof"` +} + +type KeyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type KeyValue_StrValue struct { + StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,proto3,oneof"` +} + +type KeyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*KeyValue_DoubleValue) isKeyValue_Value() {} + +func (*KeyValue_IntValue) isKeyValue_Value() {} + +func (*KeyValue_UintValue) isKeyValue_Value() {} + +func (*KeyValue_SintValue) isKeyValue_Value() {} + +func (*KeyValue_BoolValue) isKeyValue_Value() {} + +func (*KeyValue_StrValue) isKeyValue_Value() {} + +func (*KeyValue_BytesValue) isKeyValue_Value() {} + // Message indicating delete for a particular path type Delete struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Delete) Reset() { *m = Delete{} } -func (m *Delete) String() string { return proto.CompactTextString(m) } -func (*Delete) ProtoMessage() {} -func (*Delete) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Delete) ProtoMessage() {} -func (m *Delete) GetPath() string { - if m != nil { - return m.Path +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{9} +} + +func (x *Delete) GetPath() string { + if x != nil { + return x.Path } return "" } // Message indicating EOM for a particular path type Eom struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (m *Eom) Reset() { *m = Eom{} } -func (m *Eom) String() string { return proto.CompactTextString(m) } -func (*Eom) ProtoMessage() {} -func (*Eom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *Eom) Reset() { + *x = Eom{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Eom) GetPath() string { - if m != nil { - return m.Path +func (x *Eom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eom) ProtoMessage() {} + +func (x *Eom) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eom.ProtoReflect.Descriptor instead. +func (*Eom) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{10} +} + +func (x *Eom) GetPath() string { + if x != nil { + return x.Path } return "" } // Message sent for a telemetry subscription cancellation request type CancelSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *CancelSubscriptionRequest) Reset() { *m = CancelSubscriptionRequest{} } -func (m *CancelSubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionRequest) ProtoMessage() {} -func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *CancelSubscriptionRequest) Reset() { + *x = CancelSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *CancelSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionRequest) ProtoMessage() {} + +func (x *CancelSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{11} +} + +func (x *CancelSubscriptionRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription cancellation request type CancelSubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Return code - Code ReturnCode `protobuf:"varint,1,opt,name=code,enum=telemetry.ReturnCode" json:"code,omitempty"` + Code ReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=telemetry.ReturnCode" json:"code,omitempty"` // Return code string - CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr" json:"code_str,omitempty"` + CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr,proto3" json:"code_str,omitempty"` } -func (m *CancelSubscriptionReply) Reset() { *m = CancelSubscriptionReply{} } -func (m *CancelSubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionReply) ProtoMessage() {} -func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *CancelSubscriptionReply) Reset() { + *x = CancelSubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionReply) GetCode() ReturnCode { - if m != nil { - return m.Code +func (x *CancelSubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionReply) ProtoMessage() {} + +func (x *CancelSubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionReply.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{12} +} + +func (x *CancelSubscriptionReply) GetCode() ReturnCode { + if x != nil { + return x.Code } return ReturnCode_SUCCESS } -func (m *CancelSubscriptionReply) GetCodeStr() string { - if m != nil { - return m.CodeStr +func (x *CancelSubscriptionReply) GetCodeStr() string { + if x != nil { + return x.CodeStr } return "" } // Message sent for a telemetry get request type GetSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested // --- or --- // 0xFFFFFFFF for all subscription identifiers - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *GetSubscriptionsRequest) Reset() { *m = GetSubscriptionsRequest{} } -func (m *GetSubscriptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsRequest) ProtoMessage() {} -func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *GetSubscriptionsRequest) Reset() { + *x = GetSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsRequest) ProtoMessage() {} + +func (x *GetSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{13} +} + +func (x *GetSubscriptionsRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription get request type GetSubscriptionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of current telemetry subscriptions - SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList" json:"subscription_list,omitempty"` + SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList,proto3" json:"subscription_list,omitempty"` } -func (m *GetSubscriptionsReply) Reset() { *m = GetSubscriptionsReply{} } -func (m *GetSubscriptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsReply) ProtoMessage() {} -func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *GetSubscriptionsReply) Reset() { + *x = GetSubscriptionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { - if m != nil { - return m.SubscriptionList +func (x *GetSubscriptionsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsReply) ProtoMessage() {} + +func (x *GetSubscriptionsReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsReply.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{14} +} + +func (x *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { + if x != nil { + return x.SubscriptionList } return nil } // Message sent for telemetry agent operational states request type GetOperationalStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Per-subscription_id level operational state can be requested. // // Subscription identifier as returned by the device when @@ -870,434 +1256,718 @@ type GetOperationalStateRequest struct { // --- or --- // If subscription_id is not present then sent only agent-level // operational stats - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` // Control verbosity of the output - Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` + Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,proto3,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` } -func (m *GetOperationalStateRequest) Reset() { *m = GetOperationalStateRequest{} } -func (m *GetOperationalStateRequest) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateRequest) ProtoMessage() {} -func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *GetOperationalStateRequest) Reset() { + *x = GetOperationalStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetOperationalStateRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetOperationalStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationalStateRequest) ProtoMessage() {} + +func (x *GetOperationalStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationalStateRequest.ProtoReflect.Descriptor instead. +func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetOperationalStateRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } -func (m *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { - if m != nil { - return m.Verbosity +func (x *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { + if x != nil { + return x.Verbosity } return VerbosityLevel_DETAIL } // Reply to telemetry agent operational states request type GetOperationalStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of key-value pairs where // key = operational state definition // value = operational state value - Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` } -func (m *GetOperationalStateReply) Reset() { *m = GetOperationalStateReply{} } -func (m *GetOperationalStateReply) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateReply) ProtoMessage() {} -func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *GetOperationalStateReply) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *GetOperationalStateReply) Reset() { + *x = GetOperationalStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -// Message sent for a data encoding request -type DataEncodingRequest struct { -} - -func (m *DataEncodingRequest) Reset() { *m = DataEncodingRequest{} } -func (m *DataEncodingRequest) String() string { return proto.CompactTextString(m) } -func (*DataEncodingRequest) ProtoMessage() {} -func (*DataEncodingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -// Reply to data encodings supported request -type DataEncodingReply struct { - EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` +func (x *GetOperationalStateReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataEncodingReply) Reset() { *m = DataEncodingReply{} } -func (m *DataEncodingReply) String() string { return proto.CompactTextString(m) } -func (*DataEncodingReply) ProtoMessage() {} -func (*DataEncodingReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*GetOperationalStateReply) ProtoMessage() {} -func (m *DataEncodingReply) GetEncodingList() []EncodingType { - if m != nil { - return m.EncodingList +func (x *GetOperationalStateReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil -} - -func init() { - proto.RegisterType((*SubscriptionRequest)(nil), "telemetry.SubscriptionRequest") - proto.RegisterType((*SubscriptionInput)(nil), "telemetry.SubscriptionInput") - proto.RegisterType((*Collector)(nil), "telemetry.Collector") - proto.RegisterType((*Path)(nil), "telemetry.Path") - proto.RegisterType((*SubscriptionAdditionalConfig)(nil), "telemetry.SubscriptionAdditionalConfig") - proto.RegisterType((*SubscriptionReply)(nil), "telemetry.SubscriptionReply") - proto.RegisterType((*SubscriptionResponse)(nil), "telemetry.SubscriptionResponse") - proto.RegisterType((*OpenConfigData)(nil), "telemetry.OpenConfigData") - proto.RegisterType((*KeyValue)(nil), "telemetry.KeyValue") - proto.RegisterType((*Delete)(nil), "telemetry.Delete") - proto.RegisterType((*Eom)(nil), "telemetry.Eom") - proto.RegisterType((*CancelSubscriptionRequest)(nil), "telemetry.CancelSubscriptionRequest") - proto.RegisterType((*CancelSubscriptionReply)(nil), "telemetry.CancelSubscriptionReply") - proto.RegisterType((*GetSubscriptionsRequest)(nil), "telemetry.GetSubscriptionsRequest") - proto.RegisterType((*GetSubscriptionsReply)(nil), "telemetry.GetSubscriptionsReply") - proto.RegisterType((*GetOperationalStateRequest)(nil), "telemetry.GetOperationalStateRequest") - proto.RegisterType((*GetOperationalStateReply)(nil), "telemetry.GetOperationalStateReply") - proto.RegisterType((*DataEncodingRequest)(nil), "telemetry.DataEncodingRequest") - proto.RegisterType((*DataEncodingReply)(nil), "telemetry.DataEncodingReply") - proto.RegisterEnum("telemetry.ReturnCode", ReturnCode_name, ReturnCode_value) - proto.RegisterEnum("telemetry.VerbosityLevel", VerbosityLevel_name, VerbosityLevel_value) - proto.RegisterEnum("telemetry.EncodingType", EncodingType_name, EncodingType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OpenConfigTelemetry service - -type OpenConfigTelemetryClient interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) + return mi.MessageOf(x) } -type openConfigTelemetryClient struct { - cc *grpc.ClientConn +// Deprecated: Use GetOperationalStateReply.ProtoReflect.Descriptor instead. +func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{16} } -func NewOpenConfigTelemetryClient(cc *grpc.ClientConn) OpenConfigTelemetryClient { - return &openConfigTelemetryClient{cc} -} - -func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { - stream, err := grpc.NewClientStream(ctx, &_OpenConfigTelemetry_serviceDesc.Streams[0], c.cc, "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) - if err != nil { - return nil, err - } - x := &openConfigTelemetryTelemetrySubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err +func (x *GetOperationalStateReply) GetKv() []*KeyValue { + if x != nil { + return x.Kv } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenConfigTelemetry_TelemetrySubscribeClient interface { - Recv() (*OpenConfigData, error) - grpc.ClientStream + return nil } -type openConfigTelemetryTelemetrySubscribeClient struct { - grpc.ClientStream +// Message sent for a data encoding request +type DataEncodingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { - m := new(OpenConfigData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DataEncodingRequest) Reset() { + *x = DataEncodingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { - out := new(CancelSubscriptionReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DataEncodingRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { - out := new(GetSubscriptionsReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DataEncodingRequest) ProtoMessage() {} -func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { - out := new(GetOperationalStateReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *DataEncodingRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { - out := new(DataEncodingReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use DataEncodingRequest.ProtoReflect.Descriptor instead. +func (*DataEncodingRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{17} } -// Server API for OpenConfigTelemetry service - -type OpenConfigTelemetryServer interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) -} +// Reply to data encodings supported request +type DataEncodingReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterOpenConfigTelemetryServer(s *grpc.Server, srv OpenConfigTelemetryServer) { - s.RegisterService(&_OpenConfigTelemetry_serviceDesc, srv) + EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,proto3,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` } -func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscriptionRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *DataEncodingReply) Reset() { + *x = DataEncodingReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) } -type OpenConfigTelemetry_TelemetrySubscribeServer interface { - Send(*OpenConfigData) error - grpc.ServerStream +func (x *DataEncodingReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type openConfigTelemetryTelemetrySubscribeServer struct { - grpc.ServerStream -} +func (*DataEncodingReply) ProtoMessage() {} -func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { - return x.ServerStream.SendMsg(m) -} - -func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/CancelTelemetrySubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) +func (x *DataEncodingReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetrySubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DataEncodingReply.ProtoReflect.Descriptor instead. +func (*DataEncodingReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{18} } -func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationalStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) +func (x *DataEncodingReply) GetEncodingList() []EncodingType { + if x != nil { + return x.EncodingList } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetryOperationalState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DataEncodingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetDataEncodings", +var File_oc_oc_proto protoreflect.FileDescriptor + +var file_oc_oc_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6f, 0x63, 0x2f, 0x6f, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xcd, 0x01, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, + 0x0e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x09, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x75, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x55, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x6c, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x6d, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x6d, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x10, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x73, 0x22, 0x7e, + 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xec, 0x02, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x29, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x6f, 0x6d, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x45, 0x6f, 0x6d, 0x52, 0x03, 0x65, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e, + 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x75, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x09, 0x75, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x1c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x19, 0x0a, + 0x03, 0x45, 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x44, 0x0a, 0x19, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5f, + 0x0a, 0x17, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, + 0x42, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x11, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x09, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x47, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4e, 0x4f, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x45, 0x52, + 0x53, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x52, 0x49, 0x45, 0x46, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x0c, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x58, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x49, 0x45, 0x54, 0x46, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, + 0x10, 0x03, 0x32, 0xfc, 0x03, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x70, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x19, 0x67, 0x65, + 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x10, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oc_oc_proto_rawDescOnce sync.Once + file_oc_oc_proto_rawDescData = file_oc_oc_proto_rawDesc +) + +func file_oc_oc_proto_rawDescGZIP() []byte { + file_oc_oc_proto_rawDescOnce.Do(func() { + file_oc_oc_proto_rawDescData = protoimpl.X.CompressGZIP(file_oc_oc_proto_rawDescData) + }) + return file_oc_oc_proto_rawDescData +} + +var file_oc_oc_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_oc_oc_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_oc_oc_proto_goTypes = []interface{}{ + (ReturnCode)(0), // 0: telemetry.ReturnCode + (VerbosityLevel)(0), // 1: telemetry.VerbosityLevel + (EncodingType)(0), // 2: telemetry.EncodingType + (*SubscriptionRequest)(nil), // 3: telemetry.SubscriptionRequest + (*SubscriptionInput)(nil), // 4: telemetry.SubscriptionInput + (*Collector)(nil), // 5: telemetry.Collector + (*Path)(nil), // 6: telemetry.Path + (*SubscriptionAdditionalConfig)(nil), // 7: telemetry.SubscriptionAdditionalConfig + (*SubscriptionReply)(nil), // 8: telemetry.SubscriptionReply + (*SubscriptionResponse)(nil), // 9: telemetry.SubscriptionResponse + (*OpenConfigData)(nil), // 10: telemetry.OpenConfigData + (*KeyValue)(nil), // 11: telemetry.KeyValue + (*Delete)(nil), // 12: telemetry.Delete + (*Eom)(nil), // 13: telemetry.Eom + (*CancelSubscriptionRequest)(nil), // 14: telemetry.CancelSubscriptionRequest + (*CancelSubscriptionReply)(nil), // 15: telemetry.CancelSubscriptionReply + (*GetSubscriptionsRequest)(nil), // 16: telemetry.GetSubscriptionsRequest + (*GetSubscriptionsReply)(nil), // 17: telemetry.GetSubscriptionsReply + (*GetOperationalStateRequest)(nil), // 18: telemetry.GetOperationalStateRequest + (*GetOperationalStateReply)(nil), // 19: telemetry.GetOperationalStateReply + (*DataEncodingRequest)(nil), // 20: telemetry.DataEncodingRequest + (*DataEncodingReply)(nil), // 21: telemetry.DataEncodingReply +} +var file_oc_oc_proto_depIdxs = []int32{ + 4, // 0: telemetry.SubscriptionRequest.input:type_name -> telemetry.SubscriptionInput + 6, // 1: telemetry.SubscriptionRequest.path_list:type_name -> telemetry.Path + 7, // 2: telemetry.SubscriptionRequest.additional_config:type_name -> telemetry.SubscriptionAdditionalConfig + 5, // 3: telemetry.SubscriptionInput.collector_list:type_name -> telemetry.Collector + 9, // 4: telemetry.SubscriptionReply.response:type_name -> telemetry.SubscriptionResponse + 6, // 5: telemetry.SubscriptionReply.path_list:type_name -> telemetry.Path + 11, // 6: telemetry.OpenConfigData.kv:type_name -> telemetry.KeyValue + 12, // 7: telemetry.OpenConfigData.delete:type_name -> telemetry.Delete + 13, // 8: telemetry.OpenConfigData.eom:type_name -> telemetry.Eom + 0, // 9: telemetry.CancelSubscriptionReply.code:type_name -> telemetry.ReturnCode + 8, // 10: telemetry.GetSubscriptionsReply.subscription_list:type_name -> telemetry.SubscriptionReply + 1, // 11: telemetry.GetOperationalStateRequest.verbosity:type_name -> telemetry.VerbosityLevel + 11, // 12: telemetry.GetOperationalStateReply.kv:type_name -> telemetry.KeyValue + 2, // 13: telemetry.DataEncodingReply.encoding_list:type_name -> telemetry.EncodingType + 3, // 14: telemetry.OpenConfigTelemetry.telemetrySubscribe:input_type -> telemetry.SubscriptionRequest + 14, // 15: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:input_type -> telemetry.CancelSubscriptionRequest + 16, // 16: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:input_type -> telemetry.GetSubscriptionsRequest + 18, // 17: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:input_type -> telemetry.GetOperationalStateRequest + 20, // 18: telemetry.OpenConfigTelemetry.getDataEncodings:input_type -> telemetry.DataEncodingRequest + 10, // 19: telemetry.OpenConfigTelemetry.telemetrySubscribe:output_type -> telemetry.OpenConfigData + 15, // 20: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:output_type -> telemetry.CancelSubscriptionReply + 17, // 21: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:output_type -> telemetry.GetSubscriptionsReply + 19, // 22: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:output_type -> telemetry.GetOperationalStateReply + 21, // 23: telemetry.OpenConfigTelemetry.getDataEncodings:output_type -> telemetry.DataEncodingReply + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_oc_oc_proto_init() } +func file_oc_oc_proto_init() { + if File_oc_oc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oc_oc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionAdditionalConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenConfigData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + file_oc_oc_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*KeyValue_DoubleValue)(nil), + (*KeyValue_IntValue)(nil), + (*KeyValue_UintValue)(nil), + (*KeyValue_SintValue)(nil), + (*KeyValue_BoolValue)(nil), + (*KeyValue_StrValue)(nil), + (*KeyValue_BytesValue)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _OpenConfigTelemetry_serviceDesc = grpc.ServiceDesc{ - ServiceName: "telemetry.OpenConfigTelemetry", - HandlerType: (*OpenConfigTelemetryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "cancelTelemetrySubscription", - Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, - }, - { - MethodName: "getTelemetrySubscriptions", - Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, - }, - { - MethodName: "getTelemetryOperationalState", - Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, - }, - { - MethodName: "getDataEncodings", - Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "telemetrySubscribe", - Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, - ServerStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oc_oc_proto_rawDesc, + NumEnums: 3, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "oc.proto", -} - -func init() { proto.RegisterFile("oc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x25, 0xd9, 0x12, 0xaf, 0x7e, 0x42, 0x8d, 0xe3, 0x2f, 0xb2, 0xa3, 0xaf, 0x71, 0xe8, - 0x16, 0x71, 0x82, 0xd4, 0x28, 0x94, 0x45, 0x51, 0xa4, 0x40, 0x10, 0xcb, 0x74, 0xac, 0xc6, 0x95, - 0xdc, 0xa1, 0x9c, 0xb6, 0x2b, 0x82, 0x22, 0x27, 0x36, 0x11, 0xfe, 0x95, 0x33, 0x12, 0xc2, 0x4d, - 0x9e, 0xa0, 0xe8, 0x9b, 0x75, 0xdd, 0x97, 0xe8, 0x23, 0x74, 0x51, 0xcc, 0x90, 0x94, 0x46, 0x89, - 0x94, 0x34, 0x2b, 0x91, 0xe7, 0x9e, 0xb9, 0xf7, 0xcc, 0xbd, 0x67, 0x86, 0x82, 0x7a, 0xe4, 0x1c, - 0xc7, 0x49, 0xc4, 0x22, 0xa4, 0x32, 0xe2, 0x93, 0x80, 0xb0, 0x24, 0xd5, 0xff, 0x54, 0x60, 0xc7, - 0x9c, 0x4d, 0xa9, 0x93, 0x78, 0x31, 0xf3, 0xa2, 0x10, 0x93, 0xdf, 0x66, 0x84, 0x32, 0xd4, 0x87, - 0x2d, 0x2f, 0x8c, 0x67, 0xac, 0xab, 0x1c, 0x28, 0x47, 0x8d, 0x7e, 0xef, 0x78, 0xb1, 0xe4, 0x58, - 0xa6, 0x0f, 0x39, 0x07, 0x67, 0x54, 0xf4, 0x18, 0xd4, 0xd8, 0x66, 0x37, 0x96, 0xef, 0x51, 0xd6, - 0x2d, 0x1f, 0x54, 0x8e, 0x1a, 0xfd, 0x5b, 0xd2, 0xba, 0x4b, 0x9b, 0xdd, 0xe0, 0x3a, 0x67, 0x5c, - 0x78, 0x94, 0xa1, 0x09, 0x74, 0x6c, 0xd7, 0xf5, 0x78, 0x16, 0xdb, 0xb7, 0x9c, 0x28, 0x7c, 0xed, - 0x5d, 0x77, 0x2b, 0xa2, 0xda, 0x83, 0x0d, 0xd5, 0x9e, 0x2f, 0xf8, 0x03, 0x41, 0xc7, 0x9a, 0xfd, - 0x1e, 0xa2, 0x5f, 0x42, 0xe7, 0x03, 0x7d, 0xe8, 0x29, 0xb4, 0x9d, 0xc8, 0xf7, 0x89, 0xc3, 0xa2, - 0x24, 0x53, 0xa7, 0x08, 0x75, 0xb7, 0xa5, 0x3a, 0x83, 0x82, 0x80, 0x5b, 0x0b, 0x2e, 0xd7, 0xa9, - 0x7f, 0x07, 0xea, 0x22, 0x86, 0xba, 0x50, 0xb3, 0x5d, 0x37, 0x21, 0x94, 0x8a, 0xc6, 0xa8, 0xb8, - 0x78, 0x45, 0x08, 0xaa, 0x71, 0x94, 0xf0, 0x7d, 0x2b, 0x47, 0x2d, 0x2c, 0x9e, 0xf5, 0xbf, 0x14, - 0xa8, 0xf2, 0x5d, 0x8b, 0xa0, 0xcd, 0x6e, 0xf2, 0x35, 0xe2, 0x19, 0xfd, 0x0f, 0xb6, 0x5f, 0x7b, - 0x3e, 0x23, 0x89, 0x58, 0xa2, 0xe2, 0xfc, 0x0d, 0x7d, 0x0d, 0x88, 0xce, 0xe2, 0x98, 0x27, 0xb5, - 0x66, 0xa1, 0x73, 0x63, 0x87, 0xd7, 0xc4, 0x15, 0x8d, 0xa9, 0xe3, 0x4e, 0x11, 0xb9, 0x2a, 0x02, - 0xe8, 0x18, 0x76, 0x02, 0xfb, 0xad, 0x45, 0x3d, 0x9f, 0x84, 0xcc, 0xf2, 0x42, 0x46, 0x92, 0xb9, - 0xed, 0x77, 0xab, 0x42, 0x46, 0x27, 0xb0, 0xdf, 0x9a, 0x22, 0x32, 0xcc, 0x03, 0xe8, 0x21, 0x68, - 0xd4, 0x0e, 0x62, 0x9f, 0x58, 0xaf, 0x13, 0x3e, 0xeb, 0xd0, 0x49, 0xbb, 0x5b, 0x82, 0x7c, 0x2b, - 0xc3, 0xcf, 0x0a, 0x18, 0xed, 0x41, 0x3d, 0x24, 0xc4, 0xb5, 0x48, 0x14, 0x74, 0xb7, 0x45, 0xfd, - 0x1a, 0x7f, 0x37, 0xa2, 0x40, 0xff, 0x5d, 0x81, 0xde, 0xc7, 0x26, 0x83, 0x0e, 0xa1, 0xe5, 0x7b, - 0x81, 0xc7, 0xac, 0x84, 0x38, 0x51, 0xe2, 0x66, 0xed, 0xda, 0xc2, 0x4d, 0x01, 0xe2, 0x0c, 0x43, - 0x8f, 0x01, 0x65, 0x24, 0xe6, 0x05, 0xc4, 0xa2, 0xc4, 0x89, 0x42, 0x97, 0x8a, 0x76, 0x6c, 0x61, - 0x4d, 0x44, 0x26, 0x5e, 0x40, 0xcc, 0x0c, 0x97, 0xe4, 0xd0, 0xbc, 0x1d, 0xb9, 0x1c, 0xaa, 0xbf, - 0x5b, 0x9d, 0x3a, 0x26, 0xb1, 0x9f, 0xa2, 0xa7, 0x50, 0x4f, 0x08, 0x8d, 0xa3, 0x90, 0x92, 0xdc, - 0xc5, 0xf7, 0x36, 0xf8, 0x0a, 0xe7, 0x34, 0xbc, 0x58, 0xf0, 0x79, 0x5e, 0xd6, 0x9f, 0xc1, 0xed, - 0x75, 0xf9, 0xd0, 0x03, 0xb8, 0x45, 0x25, 0xdc, 0xf2, 0x5c, 0xa1, 0xa4, 0x85, 0xdb, 0x32, 0x3c, - 0x74, 0xf5, 0xbf, 0xcb, 0xd0, 0x1e, 0xc7, 0x24, 0xcc, 0xba, 0x77, 0x6a, 0x33, 0x1b, 0xdd, 0x05, - 0x95, 0xa6, 0x94, 0x91, 0xa0, 0x58, 0xa5, 0xe2, 0x7a, 0x06, 0x0c, 0x5d, 0x74, 0x1f, 0x9a, 0x4e, - 0x14, 0xc4, 0x51, 0x28, 0x86, 0xee, 0xe6, 0xae, 0x6b, 0x2c, 0xb0, 0xa1, 0x8b, 0x8e, 0x40, 0xa3, - 0xb3, 0xa9, 0xb5, 0x42, 0xab, 0x2c, 0x8a, 0x0f, 0x24, 0x66, 0xe1, 0xce, 0xaa, 0xe4, 0x4e, 0xae, - 0x3c, 0xf3, 0x01, 0xb1, 0xc2, 0x59, 0x30, 0x25, 0x89, 0x70, 0x49, 0x15, 0xb7, 0x0b, 0x78, 0x24, - 0x50, 0xd4, 0x03, 0x95, 0x4f, 0x8f, 0x32, 0x3b, 0x88, 0x85, 0x4b, 0xaa, 0x78, 0x09, 0xa0, 0x43, - 0x28, 0xbf, 0x99, 0x77, 0x6b, 0xa2, 0x7f, 0x3b, 0x52, 0xff, 0x5e, 0x92, 0xf4, 0x95, 0xed, 0xcf, - 0x08, 0x2e, 0xbf, 0x99, 0xa3, 0x87, 0xb0, 0xed, 0x12, 0x9f, 0x30, 0xd2, 0xad, 0x0b, 0x62, 0x47, - 0x22, 0x9e, 0x8a, 0x00, 0xce, 0x09, 0xe8, 0x00, 0x2a, 0xdc, 0x8d, 0xaa, 0xe0, 0xb5, 0x25, 0x9e, - 0x11, 0x05, 0x98, 0x87, 0xb8, 0xf1, 0x68, 0x1a, 0x3a, 0xd6, 0x62, 0xf4, 0x20, 0xac, 0xd2, 0xe4, - 0x60, 0x31, 0x17, 0xfd, 0x8f, 0x32, 0xd4, 0x0b, 0x09, 0x48, 0x83, 0xca, 0x1b, 0x92, 0xe6, 0x2d, - 0xe6, 0x8f, 0xe8, 0x10, 0x9a, 0x6e, 0x34, 0x9b, 0xfa, 0xc4, 0x9a, 0x73, 0x86, 0xd8, 0xb9, 0x72, - 0x5e, 0xc2, 0x8d, 0x0c, 0xcd, 0x96, 0xfd, 0x1f, 0x54, 0x2f, 0x64, 0x39, 0x83, 0x6f, 0xbc, 0x72, - 0x5e, 0xc2, 0x75, 0x2f, 0x64, 0x59, 0xf8, 0x1e, 0xc0, 0x6c, 0x19, 0xaf, 0xf1, 0xc6, 0x9c, 0x97, - 0xb0, 0x3a, 0x93, 0x09, 0x74, 0x49, 0xa8, 0x1f, 0x28, 0x47, 0x88, 0x13, 0xa8, 0x4c, 0x98, 0x46, - 0x91, 0x9f, 0x13, 0x54, 0xbe, 0x0d, 0x4e, 0xe0, 0xd8, 0x42, 0x01, 0x65, 0x49, 0x1e, 0xe7, 0xdb, - 0x54, 0xb9, 0x02, 0xca, 0x92, 0x2c, 0x7c, 0x1f, 0x1a, 0xd3, 0x94, 0x11, 0x9a, 0x13, 0x1a, 0x07, - 0xca, 0x51, 0xf3, 0xbc, 0x84, 0x41, 0x80, 0x82, 0x72, 0x52, 0x83, 0x2d, 0x11, 0xd4, 0x7b, 0xb0, - 0x9d, 0x75, 0x7a, 0xdd, 0x55, 0xa5, 0xef, 0x41, 0xc5, 0x88, 0x82, 0xb5, 0xa1, 0x53, 0xd8, 0x1b, - 0xd8, 0xa1, 0x43, 0xfc, 0x75, 0x1f, 0x91, 0xff, 0x6c, 0x7f, 0x0b, 0xee, 0xac, 0xcb, 0xc2, 0x4f, - 0xf1, 0x43, 0xa8, 0x3a, 0x91, 0x9b, 0x9d, 0xe0, 0x76, 0x7f, 0x57, 0x1a, 0x39, 0x26, 0x6c, 0x96, - 0x84, 0x83, 0xc8, 0x25, 0x58, 0x50, 0xf8, 0x05, 0xc1, 0x7f, 0x2d, 0xca, 0x8a, 0x3b, 0xb5, 0xc6, - 0xdf, 0x4d, 0x96, 0xe8, 0x27, 0x70, 0xe7, 0x05, 0x61, 0x72, 0x76, 0xfa, 0xd9, 0x22, 0xa7, 0xb0, - 0xfb, 0x61, 0x0e, 0x2e, 0x71, 0x08, 0x9d, 0x95, 0x0c, 0xd2, 0x17, 0xa6, 0xb7, 0xf1, 0xc6, 0x89, - 0xfd, 0x14, 0x6b, 0xf2, 0x32, 0x71, 0x91, 0xbc, 0x83, 0xfd, 0x17, 0x84, 0x8d, 0x63, 0x92, 0xd8, - 0xd9, 0x75, 0x6a, 0x32, 0x9b, 0x91, 0xcf, 0x95, 0x8a, 0xbe, 0x05, 0x75, 0x4e, 0x92, 0x69, 0x44, - 0x3d, 0x96, 0x8a, 0x56, 0xb4, 0xfb, 0x7b, 0x92, 0x92, 0x57, 0x45, 0xec, 0x82, 0xcc, 0x89, 0x8f, - 0x97, 0x5c, 0xfd, 0x19, 0x74, 0xd7, 0xd6, 0xe7, 0xdb, 0xcc, 0xce, 0xb2, 0xf2, 0xd1, 0xb3, 0xac, - 0xef, 0xc2, 0x0e, 0xbf, 0xbd, 0x8c, 0xd0, 0x89, 0x5c, 0x2f, 0xbc, 0xce, 0x95, 0xeb, 0x3f, 0x41, - 0x67, 0x15, 0xe6, 0x09, 0xbf, 0x87, 0x16, 0xc9, 0x81, 0x65, 0xcf, 0xda, 0xfd, 0x3b, 0xf2, 0xb1, - 0xce, 0xe3, 0x93, 0x34, 0x26, 0xb8, 0x59, 0xb0, 0x79, 0xab, 0x1e, 0xbd, 0x00, 0x58, 0x3a, 0x00, - 0x35, 0xa0, 0x66, 0x5e, 0x0d, 0x06, 0x86, 0x69, 0x6a, 0x25, 0xb4, 0x07, 0xbb, 0xa3, 0xb1, 0x65, - 0x5e, 0x9d, 0x98, 0x03, 0x3c, 0xbc, 0x9c, 0x0c, 0xc7, 0x23, 0xcb, 0x18, 0x4d, 0xf0, 0xaf, 0x9a, - 0x82, 0x3a, 0xd0, 0xba, 0x1a, 0xbd, 0x1c, 0x8d, 0x7f, 0x1e, 0x59, 0x06, 0xc6, 0x63, 0xac, 0x95, - 0x1f, 0xf5, 0xa1, 0xbd, 0xda, 0x10, 0x04, 0xb0, 0x7d, 0x6a, 0x4c, 0x9e, 0x0f, 0x2f, 0xb4, 0x12, - 0x52, 0x61, 0x6b, 0x62, 0x60, 0xd3, 0xd0, 0x14, 0xfe, 0x78, 0x82, 0x87, 0xc6, 0x99, 0x56, 0x7e, - 0xf4, 0x1c, 0x9a, 0xb2, 0x34, 0xd4, 0x02, 0xf5, 0x6a, 0x74, 0x6a, 0x9c, 0x0d, 0x47, 0xc6, 0xa9, - 0x56, 0x42, 0x35, 0xa8, 0xfc, 0xf2, 0xe3, 0x85, 0xa6, 0x70, 0xfc, 0x07, 0x73, 0x3c, 0xb2, 0x86, - 0xc6, 0xe4, 0x4c, 0x2b, 0xf3, 0xc4, 0x97, 0x78, 0x3c, 0x19, 0x3f, 0xd1, 0x2a, 0xfd, 0x7f, 0x2a, - 0xb0, 0xb3, 0xbc, 0xf2, 0x27, 0xc5, 0x96, 0x91, 0x09, 0x68, 0xb1, 0xff, 0xdc, 0x32, 0x53, 0x82, - 0xbe, 0xd8, 0x68, 0x24, 0xd1, 0xe0, 0x7d, 0x79, 0xbc, 0xab, 0x1f, 0x12, 0xbd, 0xf4, 0x8d, 0x82, - 0x3c, 0xb8, 0xeb, 0x88, 0x03, 0x36, 0x79, 0x2f, 0xb5, 0x48, 0x82, 0xbe, 0x94, 0xff, 0x08, 0x6d, - 0x3a, 0xce, 0xfb, 0xfa, 0x27, 0x58, 0xb1, 0x9f, 0xea, 0x25, 0xe4, 0xc0, 0xde, 0x35, 0x61, 0x6b, - 0xeb, 0x50, 0x24, 0xa7, 0xd8, 0x70, 0x20, 0xf7, 0x0f, 0x3e, 0xca, 0xc9, 0x8a, 0xf8, 0xd0, 0x93, - 0x8b, 0xbc, 0x6f, 0x58, 0xf4, 0xd5, 0x6a, 0x8e, 0x0d, 0x07, 0x6a, 0xff, 0xf0, 0x53, 0xb4, 0xac, - 0x1a, 0x06, 0xed, 0x9a, 0x30, 0xd9, 0xc0, 0x74, 0x65, 0x20, 0x6b, 0x1c, 0xbf, 0xdf, 0xdb, 0x18, - 0x17, 0x39, 0xa7, 0xdb, 0xe2, 0xaf, 0xf8, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xe3, - 0x4f, 0x0d, 0x96, 0x0b, 0x00, 0x00, + GoTypes: file_oc_oc_proto_goTypes, + DependencyIndexes: file_oc_oc_proto_depIdxs, + EnumInfos: file_oc_oc_proto_enumTypes, + MessageInfos: file_oc_oc_proto_msgTypes, + }.Build() + File_oc_oc_proto = out.File + file_oc_oc_proto_rawDesc = nil + file_oc_oc_proto_goTypes = nil + file_oc_oc_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index cf4aa145e6911..8c3ad32b9913f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -36,6 +36,7 @@ syntax = "proto3"; package telemetry; +option go_package = ".;telemetry"; // Interface exported by Agent service OpenConfigTelemetry { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go new file mode 100644 index 0000000000000..593e5a1e1002a --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package telemetry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OpenConfigTelemetryClient is the client API for OpenConfigTelemetry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenConfigTelemetryClient interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) +} + +type openConfigTelemetryClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenConfigTelemetryClient(cc grpc.ClientConnInterface) OpenConfigTelemetryClient { + return &openConfigTelemetryClient{cc} +} + +func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenConfigTelemetry_ServiceDesc.Streams[0], "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) + if err != nil { + return nil, err + } + x := &openConfigTelemetryTelemetrySubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenConfigTelemetry_TelemetrySubscribeClient interface { + Recv() (*OpenConfigData, error) + grpc.ClientStream +} + +type openConfigTelemetryTelemetrySubscribeClient struct { + grpc.ClientStream +} + +func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { + m := new(OpenConfigData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { + out := new(CancelSubscriptionReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { + out := new(GetSubscriptionsReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { + out := new(GetOperationalStateReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { + out := new(DataEncodingReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OpenConfigTelemetryServer is the server API for OpenConfigTelemetry service. +// All implementations must embed UnimplementedOpenConfigTelemetryServer +// for forward compatibility +type OpenConfigTelemetryServer interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +// UnimplementedOpenConfigTelemetryServer must be embedded to have forward compatible implementations. +type UnimplementedOpenConfigTelemetryServer struct { +} + +func (UnimplementedOpenConfigTelemetryServer) TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method TelemetrySubscribe not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelTelemetrySubscription not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetrySubscriptions not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetryOperationalState not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataEncodings not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) mustEmbedUnimplementedOpenConfigTelemetryServer() {} + +// UnsafeOpenConfigTelemetryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenConfigTelemetryServer will +// result in compilation errors. +type UnsafeOpenConfigTelemetryServer interface { + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +func RegisterOpenConfigTelemetryServer(s grpc.ServiceRegistrar, srv OpenConfigTelemetryServer) { + s.RegisterService(&OpenConfigTelemetry_ServiceDesc, srv) +} + +func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) +} + +type OpenConfigTelemetry_TelemetrySubscribeServer interface { + Send(*OpenConfigData) error + grpc.ServerStream +} + +type openConfigTelemetryTelemetrySubscribeServer struct { + grpc.ServerStream +} + +func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { + return x.ServerStream.SendMsg(m) +} + +func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationalStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataEncodingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getDataEncodings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OpenConfigTelemetry_ServiceDesc is the grpc.ServiceDesc for OpenConfigTelemetry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenConfigTelemetry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "telemetry.OpenConfigTelemetry", + HandlerType: (*OpenConfigTelemetryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "cancelTelemetrySubscription", + Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, + }, + { + MethodName: "getTelemetrySubscriptions", + Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, + }, + { + MethodName: "getTelemetryOperationalState", + Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, + }, + { + MethodName: "getDataEncodings", + Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "telemetrySubscribe", + Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "oc/oc.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 8db4ce0d543bc..9fed6a324bf34 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -47,6 +47,7 @@ var dataWithStringValues = &telemetry.OpenConfigData{ } type openConfigTelemetryServer struct { + telemetry.UnimplementedOpenConfigTelemetryServer } func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index a38d5989cb5d0..03b28ad2cb07f 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -15,15 +15,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/metric" + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" + "google.golang.org/protobuf/proto" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - riemanngo "github.com/riemann/riemann-go-client" - riemangoProto "github.com/riemann/riemann-go-client/proto" ) type RiemannSocketListener struct { diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 92dc829ac1312..7a995fc475cb7 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testutil" riemanngo "github.com/riemann/riemann-go-client" "github.com/stretchr/testify/require" "gotest.tools/assert" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestSocketListener_tcp(t *testing.T) { diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index cc8b1a40a10a5..b1d6ea59d2f3b 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -10,8 +10,6 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - googlepbduration "github.com/golang/protobuf/ptypes/duration" - googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" @@ -22,6 +20,8 @@ import ( distributionpb "google.golang.org/genproto/googleapis/api/distribution" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -393,8 +393,8 @@ func (s *Stackdriver) newTimeSeriesConf( ) *timeSeriesConf { filter := s.newListTimeSeriesFilter(metricType) interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ Name: fmt.Sprintf("projects/%s", s.Project), @@ -432,7 +432,7 @@ func (t *timeSeriesConf) initForAggregate(alignerStr string) { } aligner := monitoringpb.Aggregation_Aligner(alignerInt) agg := &monitoringpb.Aggregation{ - AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + AlignmentPeriod: &durationpb.Duration{Seconds: 60}, PerSeriesAligner: aligner, } t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) @@ -522,8 +522,8 @@ func (s *Stackdriver) generatetimeSeriesConfs( if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { // Update interval for timeseries requests in timeseries cache interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { timeSeriesConf.listTimeSeriesRequest.Interval = interval diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 0502c7bed9765..ad6b15145031a 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" @@ -15,6 +14,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) type Call struct { @@ -105,7 +105,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -138,7 +138,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -204,7 +204,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -249,7 +249,7 @@ func TestGather(t *testing.T) { Points: []*monitoringpb.Point{ { Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -283,7 +283,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -378,7 +378,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -473,7 +473,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -556,7 +556,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -702,7 +702,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -717,7 +717,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -732,7 +732,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -1081,7 +1081,7 @@ func TestListMetricDescriptorFilter(t *testing.T) { ch <- createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index d4f660ff7c569..d6b24ff78839b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -10,7 +10,6 @@ import ( "strings" monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. - googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" @@ -18,6 +17,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) // Stackdriver is the Google Stackdriver config info. @@ -247,16 +247,16 @@ func getStackdriverTimeInterval( switch m { case metricpb.MetricDescriptor_GAUGE: return &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil case metricpb.MetricDescriptor_CUMULATIVE: return &monitoringpb.TimeInterval{ - StartTime: &googlepb.Timestamp{ + StartTime: ×tamppb.Timestamp{ Seconds: start, }, - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: end, }, }, nil diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 8af553b374c53..bb2a620e93668 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -12,9 +12,6 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - "github.com/golang/protobuf/proto" - emptypb "github.com/golang/protobuf/ptypes/empty" - googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -22,6 +19,9 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" ) // clientOpt is the option tests should use to connect to the test server. @@ -181,7 +181,7 @@ func TestWriteAscendingTime(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -196,7 +196,7 @@ func TestWriteAscendingTime(t *testing.T) { ts = request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 2, }, }) @@ -311,7 +311,7 @@ func TestWriteBatchable(t *testing.T) { ts := request.TimeSeries[0] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -324,7 +324,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[1] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 1, }, }) @@ -337,7 +337,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[2] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 3, }, }) @@ -350,7 +350,7 @@ func TestWriteBatchable(t *testing.T) { ts = request.TimeSeries[4] require.Len(t, ts.Points, 1) require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ - EndTime: &googlepb.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: 5, }, }) diff --git a/plugins/parsers/prometheusremotewrite/parser.go b/plugins/parsers/prometheusremotewrite/parser.go index 9f0a08a682a19..3b9f25de28680 100644 --- a/plugins/parsers/prometheusremotewrite/parser.go +++ b/plugins/parsers/prometheusremotewrite/parser.go @@ -8,7 +8,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/gogo/protobuf/proto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" ) @@ -22,7 +21,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { var metrics []telegraf.Metric var req prompb.WriteRequest - if err := proto.Unmarshal(buf, &req); err != nil { + if err := req.Unmarshal(buf); err != nil { return nil, fmt.Errorf("unable to unmarshal request body: %s", err) } diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index caa8a7334d91d..e160107101ab7 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -7,9 +7,9 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) const helpString = "Telegraf collected metric" diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index deb400ba2d899..67447e66417ae 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) type Input struct { diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index fb3cea4edd352..b6dd180dba30b 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/influxdata/telegraf/plugins/serializers/prometheus" @@ -236,7 +235,8 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return false }) } - data, err := proto.Marshal(&prompb.WriteRequest{Timeseries: promTS}) + pb := &prompb.WriteRequest{Timeseries: promTS} + data, err := pb.Marshal() if err != nil { return nil, fmt.Errorf("unable to marshal protobuf: %v", err) } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go index f9e47eac54db5..f07c2c3fecfc6 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" @@ -664,7 +663,7 @@ func prompbToText(data []byte) ([]byte, error) { return nil, err } var req prompb.WriteRequest - err = proto.Unmarshal(protobuff, &req) + err = req.Unmarshal(protobuff) if err != nil { return nil, err } From d5afd654c69ff30355f33eb03ff3b7cd6596fb12 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 9 Nov 2021 08:28:38 -0700 Subject: [PATCH 031/133] chore: update go version from 1.17.2 to 1.17.3 (#10073) --- .circleci/config.yml | 2 +- Makefile | 4 ++-- scripts/ci-1.17.docker | 2 +- scripts/installgo_mac.sh | 6 +++--- scripts/installgo_windows.sh | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3fa611f8b26fb..f282aa8b7a819 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.17.2' + - image: 'quay.io/influxdb/telegraf-ci:1.17.3' environment: GOFLAGS: -p=8 mac: diff --git a/Makefile b/Makefile index 7b91fa1edcfec..52362a307790c 100644 --- a/Makefile +++ b/Makefile @@ -211,8 +211,8 @@ plugin-%: .PHONY: ci-1.17 ci-1.17: - docker build -t quay.io/influxdb/telegraf-ci:1.17.2 - < scripts/ci-1.17.docker - docker push quay.io/influxdb/telegraf-ci:1.17.2 + docker build -t quay.io/influxdb/telegraf-ci:1.17.3 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.3 .PHONY: install install: $(buildbin) diff --git a/scripts/ci-1.17.docker b/scripts/ci-1.17.docker index a69a0d7eddbe3..6b220c0898e94 100644 --- a/scripts/ci-1.17.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.17.2 +FROM golang:1.17.3 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh index f15aefa6a1641..2676495d3664a 100644 --- a/scripts/installgo_mac.sh +++ b/scripts/installgo_mac.sh @@ -3,13 +3,13 @@ set -eux ARCH=$(uname -m) -GO_VERSION="1.17.2" +GO_VERSION="1.17.3" if [ "$ARCH" = 'arm64' ]; then GO_ARCH="darwin-arm64" - GO_VERSION_SHA="ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904" # from https://golang.org/dl + GO_VERSION_SHA="ffe45ef267271b9681ca96ca9b0eb9b8598dd82f7bb95b27af3eef2461dc3d2c" # from https://golang.org/dl elif [ "$ARCH" = 'x86_64' ]; then GO_ARCH="darwin-amd64" - GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl + GO_VERSION_SHA="765c021e372a87ce0bc58d3670ab143008dae9305a79e9fa83440425529bb636" # from https://golang.org/dl fi # This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh index bd5dcca3dbc14..1571daa28eecb 100644 --- a/scripts/installgo_windows.sh +++ b/scripts/installgo_windows.sh @@ -2,7 +2,7 @@ set -eux -GO_VERSION="1.17.2" +GO_VERSION="1.17.3" setup_go () { choco upgrade golang --version=${GO_VERSION} From a288bc0bf8e060d5754c506dbeefdd79421931e9 Mon Sep 17 00:00:00 2001 From: singamSrikar <47853020+singamSrikar@users.noreply.github.com> Date: Tue, 9 Nov 2021 21:02:07 +0530 Subject: [PATCH 032/133] feat: Openstack input plugin (#9236) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 1 + plugins/inputs/all/all.go | 1 + plugins/inputs/openstack/README.md | 362 ++++++++++ plugins/inputs/openstack/openstack.go | 958 ++++++++++++++++++++++++++ 6 files changed, 1324 insertions(+) create mode 100644 plugins/inputs/openstack/README.md create mode 100644 plugins/inputs/openstack/openstack.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index e03a506bddd17..440349a6f5c91 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -109,6 +109,7 @@ following works: - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) +- github.com/gophercloud/gophercloud [Apache License 2.0](https://github.com/gophercloud/gophercloud/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 950ff837bd873..b5733ee0ea371 100644 --- a/go.mod +++ b/go.mod @@ -127,6 +127,7 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 + github.com/gophercloud/gophercloud v0.16.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 github.com/gosnmp/gosnmp v1.32.0 diff --git a/go.sum b/go.sum index 22f1d61b5c323..60f8a841c4964 100644 --- a/go.sum +++ b/go.sum @@ -1096,6 +1096,7 @@ github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97Dwqy github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 690df0d3b0e46..1320e7b025ca8 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -137,6 +137,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/openldap" _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/openstack" _ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" diff --git a/plugins/inputs/openstack/README.md b/plugins/inputs/openstack/README.md new file mode 100644 index 0000000000000..aa2d6eea09302 --- /dev/null +++ b/plugins/inputs/openstack/README.md @@ -0,0 +1,362 @@ + +# OpenStack Input Plugin + +Collects the metrics from following services of OpenStack: + +* CINDER(Block Storage) +* GLANCE(Image service) +* HEAT(Orchestration) +* KEYSTONE(Identity service) +* NEUTRON(Networking) +* NOVA(Compute Service) + +At present this plugin requires the following APIs: + +* blockstorage v2 +* compute v2 +* identity v3 +* networking v2 +* orchestration v1 + +## Configuration and Recommendations +### Recommendations + +Due to the large number of unique tags that this plugin generates, in order to keep the cardinality down it is **highly recommended** to use [modifiers](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#modifiers) like `tagexclude` to discard unwanted tags. + +For deployments with only a small number of VMs and hosts, a small polling interval (e.g. seconds-minutes) is acceptable. For larger deployments, polling a large number of systems will impact performance. Use the `interval` option to change how often the plugin is run: + +`interval`: How often a metric is gathered. Setting this value at the plugin level overrides the global agent interval setting. + +Also, consider polling OpenStack services at different intervals depending on your requirements. This will help with load and cardinality as well. + +``` +[[inputs.openstack]] + interval = 5m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["nova_services"] + .... + +[[inputs.openstack]] + interval = 30m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + .... +``` + + +### Configuration + +``` + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data recieved from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false +``` + +### Measurements, Tags & Fields + +* openstack_aggregate + * name + * aggregate_host [string] + * aggregate_hosts [integer] + * created_at [string] + * deleted [boolean] + * deleted_at [string] + * id [integer] + * updated_at [string] +* openstack_flavor + * is_public + * name + * disk [integer] + * ephemeral [integer] + * id [string] + * ram [integer] + * rxtx_factor [float] + * swap [integer] + * vcpus [integer] +* openstack_hypervisor + * cpu_arch + * cpu_feature_tsc + * cpu_feature_tsc-deadline + * cpu_feature_tsc_adjust + * cpu_feature_tsx-ctrl + * cpu_feature_vme + * cpu_feature_vmx + * cpu_feature_x2apic + * cpu_feature_xgetbv1 + * cpu_feature_xsave + * cpu_model + * cpu_vendor + * hypervisor_hostname + * hypervisor_type + * hypervisor_version + * service_host + * service_id + * state + * status + * cpu_topology_cores [integer] + * cpu_topology_sockets [integer] + * cpu_topology_threads [integer] + * current_workload [integer] + * disk_available_least [integer] + * free_disk_gb [integer] + * free_ram_mb [integer] + * host_ip [string] + * id [string] + * local_gb [integer] + * local_gb_used [integer] + * memory_mb [integer] + * memory_mb_used [integer] + * running_vms [integer] + * vcpus [integer] + * vcpus_used [integer] +* openstack_identity + * description + * domain_id + * name + * parent_id + * enabled boolean + * id string + * is_domain boolean + * projects integer +* openstack_network + * name + * openstack_tags_xyz + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * availability_zone_hints [string] + * created_at [string] + * id [string] + * shared [boolean] + * subnet_id [string] + * subnets [integer] + * updated_at [string] +* openstack_newtron_agent + * agent_host + * agent_type + * availability_zone + * binary + * topic + * admin_state_up [boolean] + * alive [boolean] + * created_at [string] + * heartbeat_timestamp [string] + * id [string] + * resources_synced [boolean] + * started_at [string] +* openstack_nova_service + * host_machine + * name + * state + * status + * zone + * disabled_reason [string] + * forced_down [boolean] + * id [string] + * updated_at [string] +* openstack_port + * device_id + * device_owner + * name + * network_id + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * allowed_address_pairs [integer] + * fixed_ips [integer] + * id [string] + * ip_address [string] + * mac_address [string] + * security_groups [string] + * subnet_id [string] +* openstack_request_duration + * agents [integer] + * aggregates [integer] + * flavors [integer] + * hypervisors [integer] + * networks [integer] + * nova_services [integer] + * ports [integer] + * projects [integer] + * servers [integer] + * stacks [integer] + * storage_pools [integer] + * subnets [integer] + * volumes [integer] +* openstack_server + * flavor + * host_id + * host_name + * image + * key_name + * name + * project + * status + * tenant_id + * user_id + * accessIPv4 [string] + * accessIPv6 [string] + * addresses [integer] + * adminPass [string] + * created [string] + * disk_gb [integer] + * fault_code [integer] + * fault_created [string] + * fault_details [string] + * fault_message [string] + * id [string] + * progress [integer] + * ram_mb [integer] + * security_groups [integer] + * updated [string] + * vcpus [integer] + * volume_id [string] + * volumes_attached [integer] +* openstack_server_diagnostics + * disk_name + * no_of_disks + * no_of_ports + * port_name + * server_id + * cpu0_time [float] + * cpu1_time [float] + * cpu2_time [float] + * cpu3_time [float] + * cpu4_time [float] + * cpu5_time [float] + * cpu6_time [float] + * cpu7_time [float] + * disk_errors [float] + * disk_read [float] + * disk_read_req [float] + * disk_write [float] + * disk_write_req [float] + * memory [float] + * memory-actual [float] + * memory-rss [float] + * memory-swap_in [float] + * port_rx [float] + * port_rx_drop [float] + * port_rx_errors [float] + * port_rx_packets [float] + * port_tx [float] + * port_tx_drop [float] + * port_tx_errors [float] + * port_tx_packets [float] +* openstack_service + * name + * service_enabled [boolean] + * service_id [string] +* openstack_storage_pool + * driver_version + * name + * storage_protocol + * vendor_name + * volume_backend_name + * free_capacity_gb [float] + * total_capacity_gb [float] +* openstack_subnet + * cidr + * gateway_ip + * ip_version + * name + * network_id + * openstack_tags_subnet_type_PRV + * project_id + * tenant_id + * allocation_pools [string] + * dhcp_enabled [boolean] + * dns_nameservers [string] + * id [string] +* openstack_volume + * attachment_attachment_id + * attachment_device + * attachment_host_name + * availability_zone + * bootable + * description + * name + * status + * user_id + * volume_type + * attachment_attached_at [string] + * attachment_server_id [string] + * created_at [string] + * encrypted [boolean] + * id [string] + * multiattach [boolean] + * size [integer] + * total_attachments [integer] + * updated_at [string] + +### Example Output + +``` +> openstack_newtron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 +> openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000 +> openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000 +> openstack_hypervisor,cpu_arch=x86_64,cpu_feature_3dnowprefetch=true,cpu_feature_abm=true,cpu_feature_acpi=true,cpu_feature_adx=true,cpu_feature_aes=true,cpu_feature_apic=true,cpu_feature_xtpr=true,cpu_model=C-Server,cpu_vendor=xyz,host=telegraf_host,hypervisor_hostname=vim3,hypervisor_type=QEMU,hypervisor_version=4002000,service_host=vim3,service_id=192,state=up,status=enabled cpu_topology_cores=28i,cpu_topology_sockets=1i,cpu_topology_threads=2i,current_workload=0i,disk_available_least=2596i,free_disk_gb=2744i,free_ram_mb=374092i,host_ip="xx:xx:xx:x::xxx",id="12",local_gb=3366i,local_gb_used=622i,memory_mb=515404i,memory_mb_used=141312i,running_vms=15i,vcpus=0i,vcpus_used=72i 1634197618000000000 +> openstack_network,host=telegraf_host,name=Network\ 2,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,status=active,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx admin_state_up=true,availability_zone_hints="",created_at="2021-07-29T15:58:25Z",id="f5af5e71-e890-4245-a377-d4d86273c319",shared=false,subnet_id="2f7341c6-074d-42aa-9abc-71c662d9b336",subnets=1i,updated_at="2021-09-02T16:46:48Z" 1634197618000000000 +> openstack_nova_service,host=telegraf_host,host_machine=vim3,name=nova-compute,state=up,status=enabled,zone=nova disabled_reason="",forced_down=false,id="192",updated_at="2021-10-14T07:46:52Z" 1634197619000000000 +> openstack_port,device_id=a043b8b3-2831-462a-bba8-19088f3db45a,device_owner=compute:nova,host=telegraf_host,name=offload-port1,network_id=6b40d744-9a48-43f2-a4c8-2e0ccb45ac96,project_id=71f9bc44621234f8af99a3949258fc7b,status=ACTIVE,tenant_id=71f9bc44621234f8af99a3949258fc7b admin_state_up=true,allowed_address_pairs=0i,fixed_ips=1i,id="fb64626a-07e1-4d78-a70d-900e989537cc",ip_address="1.1.1.5",mac_address="xx:xx:xx:xx:xx:xx",security_groups="",subnet_id="eafa1eca-b318-4746-a55a-682478466689" 1634197620000000000 +> openstack_identity,domain_id=default,host=telegraf_host,name=service,parent_id=default enabled=true,id="a0877dd2ed1d4b5f952f5689bc04b0cb",is_domain=false,projects=7i 1634197621000000000 +> openstack_server,flavor=0d438971-56cf-4f86-801f-7b04b29384cb,host=telegraf_host,host_id=c0fe05b14261d35cf8748a3f5aae1234b88c2fd62b69fe24ca4a27e9,host_name=vim1,image=b295f1f3-1w23-470c-8734-197676eedd16,name=test-VM7,project=admin,status=active,tenant_id=80ac889731f540498fb1dc78e4bcd5ed,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx accessIPv4="",accessIPv6="",addresses=1i,adminPass="",created="2021-09-07T14:40:11Z",disk_gb=8i,fault_code=0i,fault_created="0001-01-01T00:00:00Z",fault_details="",fault_message="",id="db92ee0d-459b-458e-9fe3-2be5ec7c87e1",progress=0i,ram_mb=16384i,security_groups=1i,updated="2021-09-07T14:40:19Z",vcpus=4i,volumes_attached=0i 1634197656000000000 +> openstack_service,host=telegraf_host,name=identity service_enabled=true,service_id="ad605eff92444a158d0f78768f2c4668" 1634197656000000000 +> openstack_storage_pool,driver_version=1.0.0,host=telegraf_host,name=storage_bloack_1,storage_protocol=nfs,vendor_name=xyz,volume_backend_name=abc free_capacity_gb=4847.54,total_capacity_gb=4864 1634197658000000000 +> openstack_subnet,cidr=10.10.20.10/28,gateway_ip=10.10.20.17,host=telegraf_host,ip_version=4,name=IPv4_Subnet_2,network_id=73c6e1d3-f522-4a3f-8e3c-762a0c06d68b,openstack_tags_lab=True,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx allocation_pools="10.10.20.11-10.10.20.30",dhcp_enabled=true,dns_nameservers="",id="db69fbb2-9ca1-4370-8c78-82a27951c94b" 1634197660000000000 +> openstack_volume,attachment_attachment_id=c83ca0d6-c467-44a0-ac1f-f87d769c0c65,attachment_device=/dev/vda,attachment_host_name=vim1,availability_zone=nova,bootable=true,host=telegraf_host,status=in-use,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,volume_type=storage_bloack_1 attachment_attached_at="2021-01-12T21:02:04Z",attachment_server_id="c0c6b4af-0d26-4a0b-a6b4-4ea41fa3bb4a",created_at="2021-01-12T21:01:47Z",encrypted=false,id="d4204f1b-b1ae-1233-b25c-a57d91d2846e",multiattach=false,size=80i,total_attachments=1i,updated_at="2021-01-12T21:02:04Z" 1634197660000000000 +> openstack_request_duration,host=telegraf_host networks=703214354i 1634197660000000000 +> openstack_server_diagnostics,disk_name=vda,host=telegraf_host,no_of_disks=1,no_of_ports=2,port_name=vhu1234566c-9c,server_id=fdddb58c-bbb9-1234-894b-7ae140178909 cpu0_time=4924220000000,cpu1_time=218809610000000,cpu2_time=218624300000000,cpu3_time=220505700000000,disk_errors=-1,disk_read=619156992,disk_read_req=35423,disk_write=8432728064,disk_write_req=882445,memory=8388608,memory-actual=8388608,memory-rss=37276,memory-swap_in=0,port_rx=410516469288,port_rx_drop=13373626,port_rx_errors=-1,port_rx_packets=52140392,port_tx=417312195654,port_tx_drop=0,port_tx_errors=0,port_tx_packets=321385978 1634197660000000000 +``` diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go new file mode 100644 index 0000000000000..eac0116e98fd4 --- /dev/null +++ b/plugins/inputs/openstack/openstack.go @@ -0,0 +1,958 @@ +// Package openstack implements an OpenStack input plugin for Telegraf +// +// The OpenStack input plug is a simple two phase metric collector. In the first +// pass a set of gatherers are run against the API to cache collections of resources. +// In the second phase the gathered resources are combined and emitted as metrics. +// +// No aggregation is performed by the input plugin, instead queries to InfluxDB should +// be used to gather global totals of things such as tag frequency. +package openstack + +import ( + "context" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diagnostics" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + nova_services "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/agents" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + typePort = regexp.MustCompile(`_rx$|_rx_drop$|_rx_errors$|_rx_packets$|_tx$|_tx_drop$|_tx_errors$|_tx_packets$`) + typeCPU = regexp.MustCompile(`cpu[0-9]{1,2}_time$`) + typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`) +) + +// volume is a structure used to unmarshal raw JSON from the API into. +type volume struct { + volumes.Volume + volumetenants.VolumeTenantExt +} + +// OpenStack is the main structure associated with a collection instance. +type OpenStack struct { + // Configuration variables + IdentityEndpoint string `toml:"authentication_endpoint"` + Domain string `toml:"domain"` + Project string `toml:"project"` + Username string `toml:"username"` + Password string `toml:"password"` + EnabledServices []string `toml:"enabled_services"` + ServerDiagnotics bool `toml:"server_diagnotics"` + OutputSecrets bool `toml:"output_secrets"` + TagPrefix string `toml:"tag_prefix"` + TagValue string `toml:"tag_value"` + HumanReadableTS bool `toml:"human_readable_timestamps"` + MeasureRequest bool `toml:"measure_openstack_requests"` + Log telegraf.Logger `toml:"-"` + httpconfig.HTTPClientConfig + + // Locally cached clients + identity *gophercloud.ServiceClient + compute *gophercloud.ServiceClient + volume *gophercloud.ServiceClient + network *gophercloud.ServiceClient + stack *gophercloud.ServiceClient + + // Locally cached resources + openstackFlavors map[string]flavors.Flavor + openstackHypervisors []hypervisors.Hypervisor + diag map[string]interface{} + openstackProjects map[string]projects.Project + openstackServices map[string]services.Service +} + +// containsService indicates whether a particular service is enabled +func (o *OpenStack) containsService(t string) bool { + for _, service := range o.openstackServices { + if service.Type == t { + return true + } + } + + return false +} + +// convertTimeFormat, to convert time format based on HumanReadableTS +func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { + if o.HumanReadableTS { + return t.Format("2006-01-02T15:04:05.999999999Z07:00") + } + return t.UnixNano() +} + +// Description returns a description string of the input plugin and implements +// the Input interface. +func (o *OpenStack) Description() string { + return "Collects performance metrics from OpenStack services" +} + +// sampleConfig is a sample configuration file entry. +var sampleConfig = ` + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data recieved from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false +` + +// SampleConfig return a sample configuration file for auto-generation and +// implements the Input interface. +func (o *OpenStack) SampleConfig() string { + return sampleConfig +} + +// initialize performs any necessary initialization functions +func (o *OpenStack) Init() error { + if len(o.EnabledServices) == 0 { + o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"} + } + if o.Username == "" || o.Password == "" { + return fmt.Errorf("username or password can not be empty string") + } + if o.TagValue == "" { + return fmt.Errorf("tag_value option can not be empty string") + } + sort.Strings(o.EnabledServices) + o.openstackFlavors = map[string]flavors.Flavor{} + o.openstackHypervisors = []hypervisors.Hypervisor{} + o.diag = map[string]interface{}{} + o.openstackProjects = map[string]projects.Project{} + o.openstackServices = map[string]services.Service{} + + // Authenticate against Keystone and get a token provider + authOption := gophercloud.AuthOptions{ + IdentityEndpoint: o.IdentityEndpoint, + DomainName: o.Domain, + TenantName: o.Project, + Username: o.Username, + Password: o.Password, + } + provider, err := openstack.NewClient(authOption.IdentityEndpoint) + if err != nil { + return fmt.Errorf("unable to create client for OpenStack endpoint %v", err) + } + + ctx := context.Background() + client, err := o.HTTPClientConfig.CreateClient(ctx, o.Log) + if err != nil { + return err + } + + provider.HTTPClient = *client + + if err := openstack.Authenticate(provider, authOption); err != nil { + return fmt.Errorf("unable to authenticate OpenStack user %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.identity, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V3 identity client %v", err) + } + + if err := o.gatherServices(); err != nil { + return fmt.Errorf("failed to get resource openstack services %v", err) + } + + if o.compute, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 compute client %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.network, err = openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 network client %v", err) + } + + // The Orchestration service is optional + if o.containsService("orchestration") { + if o.stack, err = openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V1 stack client %v", err) + } + } + + // The Cinder volume storage service is optional + if o.containsService("volumev2") { + if o.volume, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 volume client %v", err) + } + } + + return nil +} + +// Gather gathers resources from the OpenStack API and accumulates metrics. This +// implements the Input interface. +func (o *OpenStack) Gather(acc telegraf.Accumulator) error { + // Gather resources. Note service harvesting must come first as the other + // gatherers are dependant on this information. + gatherers := map[string]func(telegraf.Accumulator) error{ + "projects": o.gatherProjects, + "hypervisors": o.gatherHypervisors, + "flavors": o.gatherFlavors, + "servers": o.gatherServers, + "volumes": o.gatherVolumes, + "storage_pools": o.gatherStoragePools, + "subnets": o.gatherSubnets, + "ports": o.gatherPorts, + "networks": o.gatherNetworks, + "aggregates": o.gatherAggregates, + "nova_services": o.gatherNovaServices, + "agents": o.gatherAgents, + "stacks": o.gatherStacks, + } + + callDuration := map[string]interface{}{} + for _, service := range o.EnabledServices { + // As Services are already gathered in Init(), using this to accumulate them. + if service == "services" { + o.accumulateServices(acc) + continue + } + start := time.Now() + gatherer := gatherers[service] + if err := gatherer(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource %q %v", service, err)) + } + callDuration[service] = time.Since(start).Nanoseconds() + } + + if o.MeasureRequest { + for service, duration := range callDuration { + acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, map[string]string{}) + } + } + + if o.ServerDiagnotics { + if !choice.Contains("servers", o.EnabledServices) { + if err := o.gatherServers(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource server diagnostics %v", err)) + return nil + } + } + o.accumulateServerDiagnostics(acc) + } + + return nil +} + +// gatherServices collects services from the OpenStack API. +func (o *OpenStack) gatherServices() error { + page, err := services.List(o.identity, &services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list services %v", err) + } + extractedServices, err := services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract services %v", err) + } + for _, service := range extractedServices { + o.openstackServices[service.ID] = service + } + + return nil +} + +// gatherStacks collects and accumulates stacks data from the OpenStack API. +func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error { + page, err := stacks.List(o.stack, &stacks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list stacks %v", err) + } + extractedStacks, err := stacks.ExtractStacks(page) + if err != nil { + return fmt.Errorf("unable to extract stacks %v", err) + } + for _, stack := range extractedStacks { + tags := map[string]string{ + "description": stack.Description, + "name": stack.Name, + } + for _, stackTag := range stack.Tags { + tags[o.TagPrefix+stackTag] = o.TagValue + } + fields := map[string]interface{}{ + "status": strings.ToLower(stack.Status), + "id": stack.ID, + "status_reason": stack.StatusReason, + "creation_time": o.convertTimeFormat(stack.CreationTime), + "updated_time": o.convertTimeFormat(stack.UpdatedTime), + } + acc.AddFields("openstack_stack", fields, tags) + } + + return nil +} + +// gatherNovaServices collects and accumulates nova_services data from the OpenStack API. +func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error { + page, err := nova_services.List(o.compute, &nova_services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list nova_services %v", err) + } + novaServices, err := nova_services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract nova_services %v", err) + } + for _, novaService := range novaServices { + tags := map[string]string{ + "name": novaService.Binary, + "host_machine": novaService.Host, + "state": novaService.State, + "status": strings.ToLower(novaService.Status), + "zone": novaService.Zone, + } + fields := map[string]interface{}{ + "id": novaService.ID, + "disabled_reason": novaService.DisabledReason, + "forced_down": novaService.ForcedDown, + "updated_at": o.convertTimeFormat(novaService.UpdatedAt), + } + acc.AddFields("openstack_nova_service", fields, tags) + } + + return nil +} + +// gatherSubnets collects and accumulates subnets data from the OpenStack API. +func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error { + page, err := subnets.List(o.network, &subnets.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list subnets %v", err) + } + extractedSubnets, err := subnets.ExtractSubnets(page) + if err != nil { + return fmt.Errorf("unable to extract subnets %v", err) + } + for _, subnet := range extractedSubnets { + var allocationPools []string + for _, pool := range subnet.AllocationPools { + allocationPools = append(allocationPools, pool.Start+"-"+pool.End) + } + tags := map[string]string{ + "network_id": subnet.NetworkID, + "name": subnet.Name, + "description": subnet.Description, + "ip_version": strconv.Itoa(subnet.IPVersion), + "cidr": subnet.CIDR, + "gateway_ip": subnet.GatewayIP, + "tenant_id": subnet.TenantID, + "project_id": subnet.ProjectID, + "ipv6_address_mode": subnet.IPv6AddressMode, + "ipv6_ra_mode": subnet.IPv6RAMode, + "subnet_pool_id": subnet.SubnetPoolID, + } + for _, subnetTag := range subnet.Tags { + tags[o.TagPrefix+subnetTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": subnet.ID, + "dhcp_enabled": subnet.EnableDHCP, + "dns_nameservers": strings.Join(subnet.DNSNameservers[:], ","), + "allocation_pools": strings.Join(allocationPools[:], ","), + } + acc.AddFields("openstack_subnet", fields, tags) + } + return nil +} + +// gatherPorts collects and accumulates ports data from the OpenStack API. +func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error { + page, err := ports.List(o.network, &ports.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list ports %v", err) + } + extractedPorts, err := ports.ExtractPorts(page) + if err != nil { + return fmt.Errorf("unable to extract ports %v", err) + } + for _, port := range extractedPorts { + tags := map[string]string{ + "network_id": port.NetworkID, + "name": port.Name, + "description": port.Description, + "status": strings.ToLower(port.Status), + "tenant_id": port.TenantID, + "project_id": port.ProjectID, + "device_owner": port.DeviceOwner, + "device_id": port.DeviceID, + } + for _, portTag := range port.Tags { + tags[o.TagPrefix+portTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": port.ID, + "mac_address": port.MACAddress, + "admin_state_up": port.AdminStateUp, + "fixed_ips": len(port.FixedIPs), + "allowed_address_pairs": len(port.AllowedAddressPairs), + "security_groups": strings.Join(port.SecurityGroups[:], ","), + } + if len(port.FixedIPs) > 0 { + for _, ip := range port.FixedIPs { + fields["subnet_id"] = ip.SubnetID + fields["ip_address"] = ip.IPAddress + acc.AddFields("openstack_port", fields, tags) + } + } else { + acc.AddFields("openstack_port", fields, tags) + } + } + return nil +} + +// gatherNetworks collects and accumulates networks data from the OpenStack API. +func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error { + page, err := networks.List(o.network, &networks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list networks %v", err) + } + extractedNetworks, err := networks.ExtractNetworks(page) + if err != nil { + return fmt.Errorf("unable to extract networks %v", err) + } + for _, network := range extractedNetworks { + tags := map[string]string{ + "name": network.Name, + "description": network.Description, + "status": strings.ToLower(network.Status), + "tenant_id": network.TenantID, + "project_id": network.ProjectID, + } + for _, networkTag := range network.Tags { + tags[o.TagPrefix+networkTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": network.ID, + "admin_state_up": network.AdminStateUp, + "subnets": len(network.Subnets), + "shared": network.Shared, + "availability_zone_hints": strings.Join(network.AvailabilityZoneHints[:], ","), + "updated_at": o.convertTimeFormat(network.UpdatedAt), + "created_at": o.convertTimeFormat(network.CreatedAt), + } + if len(network.Subnets) > 0 { + for _, subnet := range network.Subnets { + fields["subnet_id"] = subnet + acc.AddFields("openstack_network", fields, tags) + } + } else { + acc.AddFields("openstack_network", fields, tags) + } + } + return nil +} + +// gatherAgents collects and accumulates agents data from the OpenStack API. +func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error { + page, err := agents.List(o.network, &agents.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list newtron agents %v", err) + } + extractedAgents, err := agents.ExtractAgents(page) + if err != nil { + return fmt.Errorf("unable to extract newtron agents %v", err) + } + for _, agent := range extractedAgents { + tags := map[string]string{ + "agent_type": agent.AgentType, + "availability_zone": agent.AvailabilityZone, + "binary": agent.Binary, + "description": agent.Description, + "agent_host": agent.Host, + "topic": agent.Topic, + } + fields := map[string]interface{}{ + "id": agent.ID, + "admin_state_up": agent.AdminStateUp, + "alive": agent.Alive, + "resources_synced": agent.ResourcesSynced, + "created_at": o.convertTimeFormat(agent.CreatedAt), + "started_at": o.convertTimeFormat(agent.StartedAt), + "heartbeat_timestamp": o.convertTimeFormat(agent.HeartbeatTimestamp), + } + acc.AddFields("openstack_newtron_agent", fields, tags) + } + return nil +} + +// gatherAggregates collects and accumulates aggregates data from the OpenStack API. +func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error { + page, err := aggregates.List(o.compute).AllPages() + if err != nil { + return fmt.Errorf("unable to list aggregates %v", err) + } + extractedAggregates, err := aggregates.ExtractAggregates(page) + if err != nil { + return fmt.Errorf("unable to extract aggregates %v", err) + } + for _, aggregate := range extractedAggregates { + tags := map[string]string{ + "availability_zone": aggregate.AvailabilityZone, + "name": aggregate.Name, + } + fields := map[string]interface{}{ + "id": aggregate.ID, + "aggregate_hosts": len(aggregate.Hosts), + "deleted": aggregate.Deleted, + "created_at": o.convertTimeFormat(aggregate.CreatedAt), + "updated_at": o.convertTimeFormat(aggregate.UpdatedAt), + "deleted_at": o.convertTimeFormat(aggregate.DeletedAt), + } + if len(aggregate.Hosts) > 0 { + for _, host := range aggregate.Hosts { + fields["aggregate_host"] = host + acc.AddFields("openstack_aggregate", fields, tags) + } + } else { + acc.AddFields("openstack_aggregate", fields, tags) + } + } + return nil +} + +// gatherProjects collects and accumulates projects data from the OpenStack API. +func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error { + page, err := projects.List(o.identity, &projects.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list projects %v", err) + } + extractedProjects, err := projects.ExtractProjects(page) + if err != nil { + return fmt.Errorf("unable to extract projects %v", err) + } + for _, project := range extractedProjects { + o.openstackProjects[project.ID] = project + tags := map[string]string{ + "description": project.Description, + "domain_id": project.DomainID, + "name": project.Name, + "parent_id": project.ParentID, + } + for _, projectTag := range project.Tags { + tags[o.TagPrefix+projectTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": project.ID, + "is_domain": project.IsDomain, + "enabled": project.Enabled, + "projects": len(extractedProjects), + } + acc.AddFields("openstack_identity", fields, tags) + } + return nil +} + +// gatherHypervisors collects and accumulates hypervisors data from the OpenStack API. +func (o *OpenStack) gatherHypervisors(acc telegraf.Accumulator) error { + page, err := hypervisors.List(o.compute).AllPages() + if err != nil { + return fmt.Errorf("unable to list hypervisors %v", err) + } + extractedHypervisors, err := hypervisors.ExtractHypervisors(page) + if err != nil { + return fmt.Errorf("unable to extract hypervisors %v", err) + } + o.openstackHypervisors = extractedHypervisors + if choice.Contains("hypervisors", o.EnabledServices) { + for _, hypervisor := range extractedHypervisors { + tags := map[string]string{ + "cpu_vendor": hypervisor.CPUInfo.Vendor, + "cpu_arch": hypervisor.CPUInfo.Arch, + "cpu_model": hypervisor.CPUInfo.Model, + "status": strings.ToLower(hypervisor.Status), + "state": hypervisor.State, + "hypervisor_hostname": hypervisor.HypervisorHostname, + "hypervisor_type": hypervisor.HypervisorType, + "hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion), + "service_host": hypervisor.Service.Host, + "service_id": hypervisor.Service.ID, + "service_disabled_reason": hypervisor.Service.DisabledReason, + } + for _, cpuFeature := range hypervisor.CPUInfo.Features { + tags["cpu_feature_"+cpuFeature] = "true" + } + fields := map[string]interface{}{ + "id": hypervisor.ID, + "host_ip": hypervisor.HostIP, + "cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets, + "cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores, + "cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads, + "current_workload": hypervisor.CurrentWorkload, + "disk_available_least": hypervisor.DiskAvailableLeast, + "free_disk_gb": hypervisor.FreeDiskGB, + "free_ram_mb": hypervisor.FreeRamMB, + "local_gb": hypervisor.LocalGB, + "local_gb_used": hypervisor.LocalGBUsed, + "memory_mb": hypervisor.MemoryMB, + "memory_mb_used": hypervisor.MemoryMBUsed, + "running_vms": hypervisor.RunningVMs, + "vcpus": hypervisor.VCPUs, + "vcpus_used": hypervisor.VCPUsUsed, + } + acc.AddFields("openstack_hypervisor", fields, tags) + } + } + return nil +} + +// gatherFlavors collects and accumulates flavors data from the OpenStack API. +func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error { + page, err := flavors.ListDetail(o.compute, &flavors.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list flavors %v", err) + } + extractedflavors, err := flavors.ExtractFlavors(page) + if err != nil { + return fmt.Errorf("unable to extract flavors %v", err) + } + for _, flavor := range extractedflavors { + o.openstackFlavors[flavor.ID] = flavor + tags := map[string]string{ + "name": flavor.Name, + "is_public": strconv.FormatBool(flavor.IsPublic), + } + fields := map[string]interface{}{ + "id": flavor.ID, + "disk": flavor.Disk, + "ram": flavor.RAM, + "rxtx_factor": flavor.RxTxFactor, + "swap": flavor.Swap, + "vcpus": flavor.VCPUs, + "ephemeral": flavor.Ephemeral, + } + acc.AddFields("openstack_flavor", fields, tags) + } + return nil +} + +// gatherVolumes collects and accumulates volumes data from the OpenStack API. +func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error { + page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list volumes %v", err) + } + v := []volume{} + if err := volumes.ExtractVolumesInto(page, &v); err != nil { + return fmt.Errorf("unable to extract volumes %v", err) + } + for _, volume := range v { + tags := map[string]string{ + "status": strings.ToLower(volume.Status), + "availability_zone": volume.AvailabilityZone, + "name": volume.Name, + "description": volume.Description, + "volume_type": volume.VolumeType, + "snapshot_id": volume.SnapshotID, + "source_volid": volume.SourceVolID, + "bootable": volume.Bootable, + "replication_status": strings.ToLower(volume.ReplicationStatus), + "consistency_group_id": volume.ConsistencyGroupID, + } + fields := map[string]interface{}{ + "id": volume.ID, + "size": volume.Size, + "total_attachments": len(volume.Attachments), + "encrypted": volume.Encrypted, + "multiattach": volume.Multiattach, + "created_at": o.convertTimeFormat(volume.CreatedAt), + "updated_at": o.convertTimeFormat(volume.UpdatedAt), + } + if o.OutputSecrets { + tags["user_id"] = volume.UserID + } + if len(volume.Attachments) > 0 { + for _, attachment := range volume.Attachments { + if !o.HumanReadableTS { + fields["attachment_attached_at"] = attachment.AttachedAt.UnixNano() + } else { + fields["attachment_attached_at"] = attachment.AttachedAt.Format("2006-01-02T15:04:05.999999999Z07:00") + } + tags["attachment_attachment_id"] = attachment.AttachmentID + tags["attachment_device"] = attachment.Device + tags["attachment_host_name"] = attachment.HostName + fields["attachment_server_id"] = attachment.ServerID + acc.AddFields("openstack_volume", fields, tags) + } + } else { + acc.AddFields("openstack_volume", fields, tags) + } + } + return nil +} + +// gatherStoragePools collects and accumulates storage pools data from the OpenStack API. +func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error { + results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list storage pools %v", err) + } + storagePools, err := schedulerstats.ExtractStoragePools(results) + if err != nil { + return fmt.Errorf("unable to extract storage pools %v", err) + } + for _, storagePool := range storagePools { + tags := map[string]string{ + "name": storagePool.Capabilities.VolumeBackendName, + "driver_version": storagePool.Capabilities.DriverVersion, + "storage_protocol": storagePool.Capabilities.StorageProtocol, + "vendor_name": storagePool.Capabilities.VendorName, + "volume_backend_name": storagePool.Capabilities.VolumeBackendName, + } + fields := map[string]interface{}{ + "total_capacity_gb": storagePool.Capabilities.TotalCapacityGB, + "free_capacity_gb": storagePool.Capabilities.FreeCapacityGB, + } + acc.AddFields("openstack_storage_pool", fields, tags) + } + return nil +} + +// gatherServers collects servers from the OpenStack API. +func (o *OpenStack) gatherServers(acc telegraf.Accumulator) error { + if !choice.Contains("hypervisors", o.EnabledServices) { + if err := o.gatherHypervisors(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource hypervisors %v", err)) + } + } + serverGather := choice.Contains("servers", o.EnabledServices) + for _, hypervisor := range o.openstackHypervisors { + page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: true, Host: hypervisor.HypervisorHostname}).AllPages() + if err != nil { + return fmt.Errorf("unable to list servers %v", err) + } + extractedServers, err := servers.ExtractServers(page) + if err != nil { + return fmt.Errorf("unable to extract servers %v", err) + } + for _, server := range extractedServers { + if serverGather { + o.accumulateServer(acc, server, hypervisor.HypervisorHostname) + } + if !o.ServerDiagnotics || server.Status != "ACTIVE" { + continue + } + diagnostic, err := diagnostics.Get(o.compute, server.ID).Extract() + if err != nil { + acc.AddError(fmt.Errorf("unable to get diagnostics for server(%v) %v", server.ID, err)) + continue + } + o.diag[server.ID] = diagnostic + } + } + return nil +} + +// accumulateServices accumulates statistics of services. +func (o *OpenStack) accumulateServices(acc telegraf.Accumulator) { + for _, service := range o.openstackServices { + tags := map[string]string{ + "name": service.Type, + } + fields := map[string]interface{}{ + "service_id": service.ID, + "service_enabled": service.Enabled, + } + acc.AddFields("openstack_service", fields, tags) + } +} + +// accumulateServer accumulates statistics of a server. +func (o *OpenStack) accumulateServer(acc telegraf.Accumulator, server servers.Server, hostName string) { + tags := map[string]string{} + // Extract the flavor details to avoid joins (ignore errors and leave as zero values) + var vcpus, ram, disk int + if flavorIDInterface, ok := server.Flavor["id"]; ok { + if flavorID, ok := flavorIDInterface.(string); ok { + tags["flavor"] = flavorID + if flavor, ok := o.openstackFlavors[flavorID]; ok { + vcpus = flavor.VCPUs + ram = flavor.RAM + disk = flavor.Disk + } + } + } + if imageIDInterface, ok := server.Image["id"]; ok { + if imageID, ok := imageIDInterface.(string); ok { + tags["image"] = imageID + } + } + // Try derive the associated project + project := "unknown" + if p, ok := o.openstackProjects[server.TenantID]; ok { + project = p.Name + } + tags["tenant_id"] = server.TenantID + tags["name"] = server.Name + tags["host_id"] = server.HostID + tags["status"] = strings.ToLower(server.Status) + tags["key_name"] = server.KeyName + tags["host_name"] = hostName + tags["project"] = project + fields := map[string]interface{}{ + "id": server.ID, + "progress": server.Progress, + "accessIPv4": server.AccessIPv4, + "accessIPv6": server.AccessIPv6, + "addresses": len(server.Addresses), + "security_groups": len(server.SecurityGroups), + "volumes_attached": len(server.AttachedVolumes), + "fault_code": server.Fault.Code, + "fault_details": server.Fault.Details, + "fault_message": server.Fault.Message, + "vcpus": vcpus, + "ram_mb": ram, + "disk_gb": disk, + "fault_created": o.convertTimeFormat(server.Fault.Created), + "updated": o.convertTimeFormat(server.Updated), + "created": o.convertTimeFormat(server.Created), + } + if o.OutputSecrets { + tags["user_id"] = server.UserID + fields["adminPass"] = server.AdminPass + } + if len(server.AttachedVolumes) == 0 { + acc.AddFields("openstack_server", fields, tags) + } else { + for _, AttachedVolume := range server.AttachedVolumes { + fields["volume_id"] = AttachedVolume.ID + acc.AddFields("openstack_server", fields, tags) + } + } +} + +// accumulateServerDiagnostics accumulates statistics from the compute(nova) service. +// currently only supports 'libvirt' driver. +func (o *OpenStack) accumulateServerDiagnostics(acc telegraf.Accumulator) { + for serverID, diagnostic := range o.diag { + s, ok := diagnostic.(map[string]interface{}) + if !ok { + o.Log.Warnf("unknown type for diagnostics %T", diagnostic) + continue + } + tags := map[string]string{ + "server_id": serverID, + } + fields := map[string]interface{}{} + portName := make(map[string]bool) + storageName := make(map[string]bool) + memoryStats := make(map[string]interface{}) + for k, v := range s { + if typePort.MatchString(k) { + portName[strings.Split(k, "_")[0]] = true + } else if typeCPU.MatchString(k) { + fields[k] = v + } else if typeStorage.MatchString(k) { + storageName[strings.Split(k, "_")[0]] = true + } else { + memoryStats[k] = v + } + } + fields["memory"] = memoryStats["memory"] + fields["memory-actual"] = memoryStats["memory-actual"] + fields["memory-rss"] = memoryStats["memory-rss"] + fields["memory-swap_in"] = memoryStats["memory-swap_in"] + tags["no_of_ports"] = strconv.Itoa(len(portName)) + tags["no_of_disks"] = strconv.Itoa(len(storageName)) + for key := range storageName { + fields["disk_errors"] = s[key+"_errors"] + fields["disk_read"] = s[key+"_read"] + fields["disk_read_req"] = s[key+"_read_req"] + fields["disk_write"] = s[key+"_write"] + fields["disk_write_req"] = s[key+"_write_req"] + tags["disk_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + for key := range portName { + fields["port_rx"] = s[key+"_rx"] + fields["port_rx_drop"] = s[key+"_rx_drop"] + fields["port_rx_errors"] = s[key+"_rx_errors"] + fields["port_rx_packets"] = s[key+"_rx_packets"] + fields["port_tx"] = s[key+"_tx"] + fields["port_tx_drop"] = s[key+"_tx_drop"] + fields["port_tx_errors"] = s[key+"_tx_errors"] + fields["port_tx_packets"] = s[key+"_tx_packets"] + tags["port_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + } +} + +// init registers a callback which creates a new OpenStack input instance. +func init() { + inputs.Add("openstack", func() telegraf.Input { + return &OpenStack{ + Domain: "default", + Project: "admin", + TagPrefix: "openstack_tag_", + TagValue: "true", + } + }) +} From ce46506e19c1763a402358494ff95276b772a6ce Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 9 Nov 2021 15:05:42 -0700 Subject: [PATCH 033/133] fix: do not build modbus on openbsd (#10047) --- plugins/inputs/modbus/configuration.go | 2 ++ plugins/inputs/modbus/configuration_original.go | 2 ++ plugins/inputs/modbus/modbus.go | 2 ++ plugins/inputs/modbus/modbus_openbsd.go | 3 +++ plugins/inputs/modbus/modbus_test.go | 2 ++ plugins/inputs/modbus/request.go | 2 ++ plugins/inputs/modbus/type_conversions.go | 2 ++ plugins/inputs/modbus/type_conversions16.go | 2 ++ plugins/inputs/modbus/type_conversions32.go | 2 ++ plugins/inputs/modbus/type_conversions64.go | 2 ++ 10 files changed, 21 insertions(+) create mode 100644 plugins/inputs/modbus/modbus_openbsd.go diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go index cbf36cab15524..143f12867dea6 100644 --- a/plugins/inputs/modbus/configuration.go +++ b/plugins/inputs/modbus/configuration.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "fmt" diff --git a/plugins/inputs/modbus/configuration_original.go b/plugins/inputs/modbus/configuration_original.go index cf4b2e1241b8e..78861df74e0f7 100644 --- a/plugins/inputs/modbus/configuration_original.go +++ b/plugins/inputs/modbus/configuration_original.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index c5dfee2f6cbe6..02add97429a85 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/modbus_openbsd.go b/plugins/inputs/modbus/modbus_openbsd.go new file mode 100644 index 0000000000000..6cc2bfeb3b8fd --- /dev/null +++ b/plugins/inputs/modbus/modbus_openbsd.go @@ -0,0 +1,3 @@ +//go:build openbsd + +package modbus diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index b0b49b5711075..4f9f4eca39434 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 125aebe2eb8c4..b2a31d9dcf4d3 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "sort" diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go index 556f7b423c13d..88c4b7465a824 100644 --- a/plugins/inputs/modbus/type_conversions.go +++ b/plugins/inputs/modbus/type_conversions.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import "fmt" diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go index 7766e1d0edafe..088a5d10c445a 100644 --- a/plugins/inputs/modbus/type_conversions16.go +++ b/plugins/inputs/modbus/type_conversions16.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go index 1a0255ef3e8e0..260a3dc065f70 100644 --- a/plugins/inputs/modbus/type_conversions32.go +++ b/plugins/inputs/modbus/type_conversions32.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go index f72dfdf3af66d..55b0a0775c701 100644 --- a/plugins/inputs/modbus/type_conversions64.go +++ b/plugins/inputs/modbus/type_conversions64.go @@ -1,3 +1,5 @@ +//go:build !openbsd + package modbus import ( From 8a3ba854199696bef7d087c73e9a9a51f9e2fcc1 Mon Sep 17 00:00:00 2001 From: atetevoortwis Date: Tue, 9 Nov 2021 23:29:36 +0100 Subject: [PATCH 034/133] fix: Changed VM ID from string to int (#10068) --- plugins/inputs/proxmox/proxmox.go | 8 ++++---- plugins/inputs/proxmox/proxmox_test.go | 2 +- plugins/inputs/proxmox/structs.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index efd7fae7d5d5f..101b458630eeb 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -163,8 +163,8 @@ func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { } } -func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VMStat, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id json.Number) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(id) + "/status/current" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { @@ -196,8 +196,8 @@ func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { return vmStats, nil } -func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmID + "/config" +func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(vmID) + "/config" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return VMConfig{}, err diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index f05b6450bd7be..741a272829474 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -13,7 +13,7 @@ import ( var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}` var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` -var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` +var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"},{"vmid":112,"type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container2"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index c064150c061f6..2f16841b2ff8b 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -41,7 +41,7 @@ type VMCurrentStats struct { } type VMStat struct { - ID string `json:"vmid"` + ID json.Number `json:"vmid"` Name string `json:"name"` Status string `json:"status"` UsedMem json.Number `json:"mem"` From f7827a0408075cd945a1eaf3b311882973016277 Mon Sep 17 00:00:00 2001 From: Felix Edelmann Date: Tue, 9 Nov 2021 23:30:42 +0100 Subject: [PATCH 035/133] fix: mysql: type conversion follow-up (#9966) --- plugins/inputs/mysql/mysql.go | 103 ++++++++++++++---------- plugins/inputs/mysql/mysql_test.go | 27 +------ plugins/inputs/mysql/v1/mysql.go | 8 +- plugins/inputs/mysql/v2/convert.go | 12 ++- plugins/inputs/mysql/v2/convert_test.go | 41 ++++++++++ 5 files changed, 116 insertions(+), 75 deletions(-) diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 28313b25534aa..3fbd4654ef2b4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1,7 +1,6 @@ package mysql import ( - "bytes" "database/sql" "fmt" "strconv" @@ -638,7 +637,12 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu value, err := m.parseGlobalVariables(key, val) if err != nil { - m.Log.Debugf("Error parsing global variable %q: %v", key, err) + errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } } else { fields[key] = value } @@ -658,11 +662,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { if m.MetricVersion < 2 { - v, ok := v1.ParseValue(value) - if ok { - return v, nil - } - return v, fmt.Errorf("could not parse value: %q", string(value)) + return v1.ParseValue(value) } return v2.ConvertGlobalVariables(key, value) } @@ -693,35 +693,58 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu // scanning keys and values separately // get columns names, and create an array with its length - cols, err := rows.Columns() + cols, err := rows.ColumnTypes() if err != nil { return err } - vals := make([]interface{}, len(cols)) + vals := make([]sql.RawBytes, len(cols)) + valPtrs := make([]interface{}, len(cols)) // fill the array with sql.Rawbytes for i := range vals { - vals[i] = &sql.RawBytes{} + vals[i] = sql.RawBytes{} + valPtrs[i] = &vals[i] } - if err = rows.Scan(vals...); err != nil { + if err = rows.Scan(valPtrs...); err != nil { return err } + // range over columns, and try to parse values for i, col := range cols { + colName := col.Name() + if m.MetricVersion >= 2 { - col = strings.ToLower(col) + colName = strings.ToLower(colName) } + colValue := vals[i] + if m.GatherAllSlaveChannels && - (strings.ToLower(col) == "channel_name" || strings.ToLower(col) == "connection_name") { + (strings.ToLower(colName) == "channel_name" || strings.ToLower(colName) == "connection_name") { // Since the default channel name is empty, we need this block channelName := "default" - if len(*vals[i].(*sql.RawBytes)) > 0 { - channelName = string(*vals[i].(*sql.RawBytes)) + if len(colValue) > 0 { + channelName = string(colValue) } tags["channel"] = channelName - } else if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { - fields["slave_"+col] = value + continue } + + if colValue == nil || len(colValue) == 0 { + continue + } + + value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) + if err != nil { + errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } + continue + } + + fields["slave_"+colName] = value } acc.AddFields("mysql", fields, tags) @@ -877,7 +900,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum key = strings.ToLower(key) value, err := v2.ConvertGlobalStatus(key, val) if err != nil { - m.Log.Debugf("Error parsing global status: %v", err) + acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) } else { fields[key] = value } @@ -1346,10 +1369,16 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu if err := rows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { - fields[key] = value + value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") + if err != nil { + acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) + continue } + + fields[key] = value + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_innodb", fields, tags) @@ -1914,34 +1943,22 @@ func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, a return nil } -func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { +func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeName string) (interface{}, error) { if m.MetricVersion < 2 { return v1.ParseValue(value) } - return parseValue(value) -} - -// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 -func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { - return 1, true - } - - if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { - return 0, true - } - - if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { - return val, true - } - if val, err := strconv.ParseFloat(string(value), 64); err == nil { - return val, true - } - if len(string(value)) > 0 { - return string(value), true + switch databaseTypeName { + case "INT": + return v2.ParseInt(value) + case "BIGINT": + return v2.ParseUint(value) + case "VARCHAR": + return v2.ParseString(value) + default: + m.Log.Debugf("unknown database type name %q in parseValueByDatabaseTypeName", databaseTypeName) + return v2.ParseValue(value) } - return nil, false } // findThreadState can be used to find thread state by command and plain state diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 410f80213252f..868c86f18b9cb 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -1,7 +1,6 @@ package mysql import ( - "database/sql" "fmt" "testing" @@ -178,31 +177,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } } } -func TestParseValue(t *testing.T) { - testCases := []struct { - rawByte sql.RawBytes - output interface{} - boolValue bool - }{ - {sql.RawBytes("123"), int64(123), true}, - {sql.RawBytes("abc"), "abc", true}, - {sql.RawBytes("10.1"), 10.1, true}, - {sql.RawBytes("ON"), 1, true}, - {sql.RawBytes("OFF"), 0, true}, - {sql.RawBytes("NO"), 0, true}, - {sql.RawBytes("YES"), 1, true}, - {sql.RawBytes("No"), 0, true}, - {sql.RawBytes("Yes"), 1, true}, - {sql.RawBytes("-794"), int64(-794), true}, - {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), true}, - {sql.RawBytes(""), nil, false}, - } - for _, cases := range testCases { - if got, ok := parseValue(cases.rawByte); got != cases.output && ok != cases.boolValue { - t.Errorf("for %s wanted %t, got %t", string(cases.rawByte), cases.output, got) - } - } -} + func TestNewNamespace(t *testing.T) { testCases := []struct { words []string diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 374782f9cb29a..7f4e1a7dcacae 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -182,14 +182,14 @@ var Mappings = []*Mapping{ }, } -func ParseValue(value sql.RawBytes) (float64, bool) { +func ParseValue(value sql.RawBytes) (float64, error) { if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { - return 1, true + return 1, nil } if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { - return 0, true + return 0, nil } n, err := strconv.ParseFloat(string(value), 64) - return n, err == nil + return n, err } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index d5b73ec7f4c1e..b446890c9baec 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -25,6 +25,10 @@ func ParseUint(value sql.RawBytes) (interface{}, error) { return strconv.ParseUint(string(value), 10, 64) } +func ParseFloat(value sql.RawBytes) (interface{}, error) { + return strconv.ParseFloat(string(value), 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -86,11 +90,15 @@ var GlobalStatusConversions = map[string]ConversionFunc{ "innodb_data_pending_fsyncs": ParseUint, "ssl_ctx_verify_depth": ParseUint, "ssl_verify_depth": ParseUint, + + // see https://galeracluster.com/library/documentation/galera-status-variables.html + "wsrep_local_index": ParseUint, + "wsrep_local_send_queue_avg": ParseFloat, } -// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html -// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html var GlobalVariableConversions = map[string]ConversionFunc{ + // see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html + // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html "delay_key_write": ParseString, // ON, OFF, ALL "enforce_gtid_consistency": ParseString, // ON, OFF, WARN "event_scheduler": ParseString, // YES, NO, DISABLED diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 43133eeb39c1b..95083a1e5016f 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -2,6 +2,7 @@ package v2 import ( "database/sql" + "strings" "testing" "github.com/stretchr/testify/require" @@ -84,3 +85,43 @@ func TestCovertGlobalVariables(t *testing.T) { }) } } + +func TestParseValue(t *testing.T) { + testCases := []struct { + rawByte sql.RawBytes + output interface{} + err string + }{ + {sql.RawBytes("123"), int64(123), ""}, + {sql.RawBytes("abc"), "abc", ""}, + {sql.RawBytes("10.1"), 10.1, ""}, + {sql.RawBytes("ON"), 1, ""}, + {sql.RawBytes("OFF"), 0, ""}, + {sql.RawBytes("NO"), 0, ""}, + {sql.RawBytes("YES"), 1, ""}, + {sql.RawBytes("No"), 0, ""}, + {sql.RawBytes("Yes"), 1, ""}, + {sql.RawBytes("-794"), int64(-794), ""}, + {sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32 + {sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32 + {sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64 + {sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64 + {sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64 + {sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes(""), nil, "unconvertible value"}, + } + for _, cases := range testCases { + got, err := ParseValue(cases.rawByte) + + if err != nil && cases.err == "" { + t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error()) + } else if err != nil && !strings.HasPrefix(err.Error(), cases.err) { + t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error()) + } else if err == nil && cases.err != "" { + t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err) + } else if got != cases.output { + t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got) + } + } +} From 19d67173bb9821b1805505b80026cbb1325cd6aa Mon Sep 17 00:00:00 2001 From: reimda Date: Wed, 10 Nov 2021 08:04:17 -0700 Subject: [PATCH 036/133] Chore: Update gosnmp module from 1.32 to 1.33 (#10076) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b5733ee0ea371..625386db84383 100644 --- a/go.mod +++ b/go.mod @@ -130,7 +130,7 @@ require ( github.com/gophercloud/gophercloud v0.16.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 - github.com/gosnmp/gosnmp v1.32.0 + github.com/gosnmp/gosnmp v1.33.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect diff --git a/go.sum b/go.sum index 60f8a841c4964..2799ef3c6fce6 100644 --- a/go.sum +++ b/go.sum @@ -1117,8 +1117,8 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= -github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gosnmp/gosnmp v1.33.0 h1:WNwN5Rj/9Y70VplIKXuaUiYVxdcaXhfAuLElKx4lnpU= +github.com/gosnmp/gosnmp v1.33.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= From 279fc8352c63ac355a3843643720b3b89bd97808 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 10 Nov 2021 12:53:11 -0600 Subject: [PATCH 037/133] fix(inputs/mongodb): resolve all markdown linter issues in README.md (#10077) --- plugins/inputs/mongodb/README.md | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 15a474e6bb66a..3247e3c78afc3 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -2,7 +2,7 @@ All MongoDB server versions from 2.6 and higher are supported. -### Configuration: +## Configuration ```toml [[inputs.mongodb]] @@ -40,20 +40,22 @@ All MongoDB server versions from 2.6 and higher are supported. # insecure_skip_verify = false ``` -#### Permissions: +### Permissions If your MongoDB instance has access control enabled you will need to connect as a user with sufficient rights. With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In version 3.2 you may also need these additional permissions: -``` + +```shell > db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}]) ``` If the user is missing required privileges you may see an error in the Telegraf logs similar to: -``` + +```shell Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` @@ -61,7 +63,7 @@ Some permission related errors are logged at debug level, you can check these messages by setting `debug = true` in the agent section of the configuration or by running Telegraf with the `--debug` argument. -### Metrics: +### Metrics - mongodb - tags: @@ -231,7 +233,7 @@ by running Telegraf with the `--debug` argument. - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -+ mongodb_db_stats +- mongodb_db_stats - tags: - db_name - hostname @@ -293,8 +295,9 @@ by running Telegraf with the `--debug` argument. - commands_time (integer) - commands_count (integer) -### Example Output: -``` +### Example Output + +```shell mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 From 62a05b23728ac4bb9e8222a28644f24c501e44bb Mon Sep 17 00:00:00 2001 From: anti32 Date: Wed, 10 Nov 2021 21:49:40 +0200 Subject: [PATCH 038/133] fix (inputs/mongodb) readme: correct connection URI (#10075) --- plugins/inputs/mongodb/README.md | 2 +- plugins/inputs/mongodb/mongodb.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 3247e3c78afc3..678d80c73184d 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -11,7 +11,7 @@ All MongoDB server versions from 2.6 and higher are supported. ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status. ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 0366636200064..3417252ddeb59 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -44,7 +44,7 @@ var sampleConfig = ` ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which From 8f309dc34d96901cca44c3870b8a0370b6cf7202 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 10 Nov 2021 17:45:50 -0600 Subject: [PATCH 039/133] fix(parser/csv): resolve linter issues (#10093) --- plugins/parsers/csv/README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 220ac60686636..192c9216b3a82 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -3,7 +3,7 @@ The `csv` parser creates metrics from a document containing comma separated values. -### Configuration +## Configuration ```toml [[inputs.file]] @@ -78,7 +78,8 @@ values. ## The field will be skipped entirely where it matches any values inserted here. csv_skip_values = [] ``` -#### csv_timestamp_column, csv_timestamp_format + +### csv_timestamp_column, csv_timestamp_format By default the current time will be used for all created metrics, to set the time using the JSON document you can use the `csv_timestamp_column` and @@ -104,6 +105,7 @@ columns and rows. ### Examples Config: + ```toml [[inputs.file]] files = ["example"] @@ -114,13 +116,15 @@ Config: ``` Input: -``` + +```shell measurement,cpu,time_user,time_system,time_idle,time cpu,cpu0,42,42,42,2018-09-13T13:03:28Z ``` Output: -``` + +```shell cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 ``` From b9c444bae8617cc0aa2c2dda81abf435420b6272 Mon Sep 17 00:00:00 2001 From: David B <36965011+DavidBuettner@users.noreply.github.com> Date: Fri, 12 Nov 2021 00:09:51 +0100 Subject: [PATCH 040/133] fix: update BurntSushi/toml for hex config support (#10089) --- go.mod | 2 +- go.sum | 3 ++- plugins/common/shim/config_test.go | 2 ++ plugins/common/shim/testdata/special.conf | 3 ++- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 625386db84383..530d4a1947d76 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v0.4.1 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.21 // indirect diff --git a/go.sum b/go.sum index 2799ef3c6fce6..cd8ea8078be8b 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,9 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 75ad18239fbb0..762ca5dd283b2 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -54,6 +54,7 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*time.Second, inp.Duration) require.EqualValues(t, 3*1000*1000, inp.Size) + require.EqualValues(t, 52, inp.Hex) } func TestLoadingProcessorWithConfig(t *testing.T) { @@ -72,6 +73,7 @@ func TestLoadingProcessorWithConfig(t *testing.T) { type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf index c324b638497c5..53af78620701d 100644 --- a/plugins/common/shim/testdata/special.conf +++ b/plugins/common/shim/testdata/special.conf @@ -1,4 +1,5 @@ # testing custom field types [[inputs.test]] duration = "3s" - size = "3MB" \ No newline at end of file + size = "3MB" + hex = 0x34 \ No newline at end of file From c1263fb03bc83dded7442dc08dfed3e16e91bc60 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Mon, 15 Nov 2021 09:09:39 -0600 Subject: [PATCH 041/133] fix: super-linter use v4.8.1, issue with latest (#10108) --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 8ba9ae2944823..104d71db2230a 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -48,7 +48,7 @@ jobs: # Run Linter against code base # ################################ - name: Lint Code Base - uses: github/super-linter@v4 + uses: github/super-linter@v4.8.1 env: VALIDATE_ALL_CODEBASE: false DEFAULT_BRANCH: master From b9e4978b17754faa5900e65f53fac4693da1ec13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Mon, 15 Nov 2021 16:14:09 +0100 Subject: [PATCH 042/133] fix: Linter fixes for plugins/inputs/p* (#10066) --- go.mod | 4 - plugins/inputs/passenger/passenger_test.go | 7 +- plugins/inputs/pgbouncer/pgbouncer_test.go | 14 +- plugins/inputs/phpfpm/fcgi_client.go | 13 +- plugins/inputs/phpfpm/phpfpm.go | 16 +- plugins/inputs/phpfpm/phpfpm_test.go | 12 +- plugins/inputs/ping/ping.go | 18 +- plugins/inputs/ping/ping_notwindows.go | 98 ++++++----- plugins/inputs/ping/ping_test.go | 162 +++++++++--------- plugins/inputs/ping/ping_windows_test.go | 66 +++---- plugins/inputs/postfix/postfix.go | 22 ++- plugins/inputs/postfix/postfix_test.go | 28 +-- plugins/inputs/postgresql/postgresql_test.go | 30 ++-- plugins/inputs/postgresql/service.go | 2 +- .../postgresql_extensible.go | 65 +++---- .../postgresql_extensible_test.go | 50 +++--- plugins/inputs/powerdns/powerdns.go | 10 +- plugins/inputs/powerdns/powerdns_test.go | 21 ++- .../powerdns_recursor/powerdns_recursor.go | 10 +- .../powerdns_recursor_test.go | 36 ++-- plugins/inputs/processes/processes_test.go | 14 +- .../procstat/native_finder_windows_test.go | 10 +- plugins/inputs/procstat/process.go | 4 +- plugins/inputs/procstat/procstat_test.go | 44 ++--- plugins/inputs/prometheus/kubernetes_test.go | 62 +++---- plugins/inputs/prometheus/parser.go | 14 +- plugins/inputs/prometheus/parser_test.go | 66 +++---- plugins/inputs/prometheus/prometheus.go | 30 ++-- plugins/inputs/prometheus/prometheus_test.go | 58 +++---- plugins/inputs/proxmox/proxmox.go | 39 +++-- plugins/inputs/proxmox/proxmox_test.go | 12 +- plugins/inputs/proxmox/structs.go | 7 + 32 files changed, 555 insertions(+), 489 deletions(-) diff --git a/go.mod b/go.mod index 530d4a1947d76..c0f59b6da7b73 100644 --- a/go.mod +++ b/go.mod @@ -72,7 +72,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 - github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect @@ -171,8 +170,6 @@ require ( github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/compress v1.13.6 // indirect - github.com/kr/pretty v0.3.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -345,7 +342,6 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect - github.com/rogpeppe/go-internal v1.6.2 // indirect go.opentelemetry.io/otel v1.0.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index ecbeeb532fd1e..5578b88b77525 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -8,7 +8,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" @@ -49,7 +48,7 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) + require.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) } func Test_Invalid_Xml(t *testing.T) { @@ -65,7 +64,7 @@ func Test_Invalid_Xml(t *testing.T) { err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, "cannot parse input with error: EOF", err.Error()) + require.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli @@ -80,7 +79,7 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) { err = r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") + require.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 7dd75fb4ae487..2c9500260078c 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -4,10 +4,10 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { @@ -55,20 +55,20 @@ func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetricsPgBouncer { - assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + require.True(t, acc.HasInt64Field("pgbouncer", metric)) metricsCounted++ } for _, metric := range intMetricsPgBouncerPools { - assert.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) + require.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + require.True(t, acc.HasInt32Field("pgbouncer", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) } diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 56fb38188fb75..b34b8a3063b52 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -33,26 +33,23 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { return fcgi, err } -func (c *conn) Request( - env map[string]string, - requestData string, -) (retout []byte, reterr []byte, err error) { +func (c *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { defer c.rwc.Close() var reqID uint16 = 1 err = c.writeBeginRequest(reqID, uint16(roleResponder), 0) if err != nil { - return + return nil, nil, err } err = c.writePairs(typeParams, reqID, env) if err != nil { - return + return nil, nil, err } if len(requestData) > 0 { if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil { - return + return nil, nil, err } } @@ -82,5 +79,5 @@ READ_LOOP: } } - return + return retout, reterr, err } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 77c4bf0aeee56..532567a2486fa 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -276,12 +276,12 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { func expandUrls(urls []string) ([]string, error) { addrs := make([]string, 0, len(urls)) - for _, url := range urls { - if isNetworkURL(url) { - addrs = append(addrs, url) + for _, address := range urls { + if isNetworkURL(address) { + addrs = append(addrs, address) continue } - paths, err := globUnixSocket(url) + paths, err := globUnixSocket(address) if err != nil { return nil, err } @@ -290,8 +290,8 @@ func expandUrls(urls []string) ([]string, error) { return addrs, nil } -func globUnixSocket(url string) ([]string, error) { - pattern, status := unixSocketPaths(url) +func globUnixSocket(address string) ([]string, error) { + pattern, status := unixSocketPaths(address) glob, err := globpath.Compile(pattern) if err != nil { return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err) @@ -312,9 +312,7 @@ func globUnixSocket(url string) ([]string, error) { return addresses, nil } -func unixSocketPaths(addr string) (string, string) { - var socketPath, statusPath string - +func unixSocketPaths(addr string) (socketPath string, statusPath string) { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { socketPath = socketAddr[0] diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index d51c576aad7f0..cf207fec901d6 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -16,9 +16,9 @@ import ( "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -283,7 +283,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "/status") + require.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { @@ -297,8 +297,8 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) - assert.Contains(t, err.Error(), `lookup aninvalidone`) + require.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) + require.Contains(t, err.Error(), `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { @@ -312,7 +312,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) + require.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } const outputSample = ` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 7d3b05178ad0b..60f3aaf414b74 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -13,6 +13,7 @@ import ( "time" "github.com/go-ping/ping" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -82,6 +83,20 @@ type Ping struct { Size *int } +type roundTripTimeStats struct { + min float64 + avg float64 + max float64 + stddev float64 +} + +type stats struct { + trans int + recv int + ttl int + roundTripTimeStats +} + func (*Ping) Description() string { return "Ping given url(s) and return statistics" } @@ -262,7 +277,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { sort.Sort(durationSlice(stats.Rtts)) for _, perc := range p.Percentiles { - var value = percentile(durationSlice(stats.Rtts), perc) + var value = percentile(stats.Rtts, perc) var field = fmt.Sprintf("percentile%v_ms", perc) fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) } @@ -273,6 +288,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { fields["ttl"] = stats.ttl } + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["percent_packet_loss"] = float64(stats.PacketLoss) fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond) fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond) diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index f6bd751c2a4e3..c09c4a3fcd359 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -57,7 +57,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { return } } - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) + stats, err := processPingOutput(out) if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) @@ -67,25 +67,25 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { } // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 + loss := float64(stats.trans-stats.recv) / float64(stats.trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec + fields["packets_transmitted"] = stats.trans + fields["packets_received"] = stats.recv fields["percent_packet_loss"] = loss - if ttl >= 0 { - fields["ttl"] = ttl + if stats.ttl >= 0 { + fields["ttl"] = stats.ttl } - if min >= 0 { - fields["minimum_response_ms"] = min + if stats.min >= 0 { + fields["minimum_response_ms"] = stats.min } - if avg >= 0 { - fields["average_response_ms"] = avg + if stats.avg >= 0 { + fields["average_response_ms"] = stats.avg } - if max >= 0 { - fields["maximum_response_ms"] = max + if stats.max >= 0 { + fields["maximum_response_ms"] = stats.max } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev + if stats.stddev >= 0 { + fields["standard_deviation_ms"] = stats.stddev } acc.AddFields("ping", fields, tags) } @@ -165,36 +165,47 @@ func (p *Ping) args(url string, system string) []string { // round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms // // It returns (, , ) -func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { - var trans, recv, ttl int = 0, 0, -1 - var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 +func processPingOutput(out string) (stats, error) { + stats := stats{ + trans: 0, + recv: 0, + ttl: -1, + roundTripTimeStats: roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + }, + } + // Set this error to nil if we find a 'transmitted' line - err := errors.New("Fatal error processing ping output") + err := errors.New("fatal error processing ping output") lines := strings.Split(out, "\n") for _, line := range lines { // Reading only first TTL, ignoring other TTL messages - if ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { - ttl, err = getTTL(line) - } else if strings.Contains(line, "transmitted") && - strings.Contains(line, "received") { - trans, recv, err = getPacketStats(line, trans, recv) + if stats.ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { + stats.ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && strings.Contains(line, "received") { + stats.trans, stats.recv, err = getPacketStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } else if strings.Contains(line, "min/avg/max") { - min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) + stats.roundTripTimeStats, err = checkRoundTripTimeStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } } - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } -func getPacketStats(line string, trans, recv int) (int, int, error) { +func getPacketStats(line string) (trans int, recv int, err error) { + trans, recv = 0, 0 + stats := strings.Split(line, ", ") // Transmitted packets - trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) if err != nil { return trans, recv, err } @@ -209,28 +220,35 @@ func getTTL(line string) (int, error) { return strconv.Atoi(ttlMatch[2]) } -func checkRoundTripTimeStats(line string, min, avg, max, - stddev float64) (float64, float64, float64, float64, error) { +func checkRoundTripTimeStats(line string) (roundTripTimeStats, error) { + roundTripTimeStats := roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + } + stats := strings.Split(line, " ")[3] data := strings.Split(stats, "/") - min, err := strconv.ParseFloat(data[0], 64) + var err error + roundTripTimeStats.min, err = strconv.ParseFloat(data[0], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - avg, err = strconv.ParseFloat(data[1], 64) + roundTripTimeStats.avg, err = strconv.ParseFloat(data[1], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - max, err = strconv.ParseFloat(data[2], 64) + roundTripTimeStats.max, err = strconv.ParseFloat(data[2], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) + roundTripTimeStats.stddev, err = strconv.ParseFloat(data[3], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } } - return min, avg, max, stddev, err + return roundTripTimeStats, err } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 7faba097c4562..94a65075e651a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -12,10 +12,10 @@ import ( "time" "github.com/go-ping/ping" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // BSD/Darwin ping output @@ -80,45 +80,45 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) - assert.NoError(t, err) - assert.Equal(t, 55, ttl, "ttl value is 55") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 15.087, min, 0.001) - assert.InDelta(t, 20.224, avg, 0.001) - assert.InDelta(t, 27.263, max, 0.001) - assert.InDelta(t, 4.076, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(freebsdPing6Output) - assert.NoError(t, err) - assert.Equal(t, 117, ttl, "ttl value is 117") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.727, min, 0.001) - assert.InDelta(t, 53.211, avg, 0.001) - assert.InDelta(t, 93.870, max, 0.001) - assert.InDelta(t, 22.000, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 56, ttl, "ttl value is 56") - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, rec, "4 packets were received") - assert.InDelta(t, 15.810, min, 0.001) - assert.InDelta(t, 17.611, avg, 0.001) - assert.InDelta(t, 22.559, max, 0.001) - assert.InDelta(t, -1.0, stddev, 0.001) + stats, err := processPingOutput(bsdPingOutput) + require.NoError(t, err) + require.Equal(t, 55, stats.ttl, "ttl value is 55") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 15.087, stats.min, 0.001) + require.InDelta(t, 20.224, stats.avg, 0.001) + require.InDelta(t, 27.263, stats.max, 0.001) + require.InDelta(t, 4.076, stats.stddev, 0.001) + + stats, err = processPingOutput(freebsdPing6Output) + require.NoError(t, err) + require.Equal(t, 117, stats.ttl, "ttl value is 117") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.727, stats.min, 0.001) + require.InDelta(t, 53.211, stats.avg, 0.001) + require.InDelta(t, 93.870, stats.max, 0.001) + require.InDelta(t, 22.000, stats.stddev, 0.001) + + stats, err = processPingOutput(linuxPingOutput) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) + + stats, err = processPingOutput(busyBoxPingOutput) + require.NoError(t, err) + require.Equal(t, 56, stats.ttl, "ttl value is 56") + require.Equal(t, 4, stats.trans, "4 packets were transmitted") + require.Equal(t, 4, stats.recv, "4 packets were received") + require.InDelta(t, 15.810, stats.min, 0.001) + require.InDelta(t, 17.611, stats.avg, 0.001) + require.InDelta(t, 22.559, stats.max, 0.001) + require.InDelta(t, -1.0, stats.stddev, 0.001) } // Linux ping output with varying TTL @@ -137,22 +137,22 @@ rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms // Test that ping command output is processed properly func TestProcessPingOutputWithVaryingTTL(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were transmitted") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) + stats, err := processPingOutput(linuxPingOutputWithVaryingTTL) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were transmitted") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) } // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) - assert.Error(t, err, "Error was expected from processPingOutput") + _, err := processPingOutput(fatalPingOutput) + require.Error(t, err, "Error was expected from processPingOutput") } // Test that default arg lists are created correctly @@ -350,7 +350,7 @@ func TestBadPingGather(t *testing.T) { } func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) { - return fatalPingOutput, errors.New("So very bad") + return fatalPingOutput, errors.New("so very bad") } // Test that a fatal ping command does not gather any statistics. @@ -363,20 +363,20 @@ func TestFatalPingGather(t *testing.T) { err := acc.GatherError(p.Gather) require.Error(t, err) - require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, So very bad") - assert.False(t, acc.HasMeasurement("packets_transmitted"), + require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, so very bad") + require.False(t, acc.HasMeasurement("packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("packets_received"), + require.False(t, acc.HasMeasurement("packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("percent_packet_loss"), + require.False(t, acc.HasMeasurement("percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("ttl"), + require.False(t, acc.HasMeasurement("ttl"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("minimum_response_ms"), + require.False(t, acc.HasMeasurement("minimum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("average_response_ms"), + require.False(t, acc.HasMeasurement("average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("maximum_response_ms"), + require.False(t, acc.HasMeasurement("maximum_response_ms"), "Fatal ping should not have packet measurements") } @@ -385,8 +385,8 @@ func TestErrorWithHostNamePingGather(t *testing.T) { out string error error }{ - {"", errors.New("host www.amazon.com: So very bad")}, - {"so bad", errors.New("host www.amazon.com: so bad, So very bad")}, + {"", errors.New("host www.amazon.com: so very bad")}, + {"so bad", errors.New("host www.amazon.com: so bad, so very bad")}, } for _, param := range params { @@ -394,12 +394,12 @@ func TestErrorWithHostNamePingGather(t *testing.T) { p := Ping{ Urls: []string{"www.amazon.com"}, pingHost: func(binary string, timeout float64, args ...string) (string, error) { - return param.out, errors.New("So very bad") + return param.out, errors.New("so very bad") }, } require.Error(t, acc.GatherError(p.Gather)) - assert.True(t, len(acc.Errors) > 0) - assert.Contains(t, acc.Errors, param.error) + require.True(t, len(acc.Errors) > 0) + require.Contains(t, acc.Errors, param.error) } } @@ -409,13 +409,13 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } err := acc.GatherError(p.Gather) require.Error(t, err) - require.EqualValues(t, err.Error(), "Fatal error processing ping output: www.google.com") + require.EqualValues(t, err.Error(), "fatal error processing ping output: www.google.com") } // Test that Gather function works using native ping @@ -469,19 +469,19 @@ func TestPingGatherNative(t *testing.T) { var acc testutil.Accumulator require.NoError(t, tc.P.Init()) require.NoError(t, acc.GatherError(tc.P.Gather)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) - assert.True(t, acc.HasField("ping", "percentile50_ms")) - assert.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) - assert.True(t, acc.HasField("ping", "percentile95_ms")) - assert.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) - assert.True(t, acc.HasField("ping", "percentile99_ms")) - assert.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) - assert.True(t, acc.HasField("ping", "percent_packet_loss")) - assert.True(t, acc.HasField("ping", "minimum_response_ms")) - assert.True(t, acc.HasField("ping", "average_response_ms")) - assert.True(t, acc.HasField("ping", "maximum_response_ms")) - assert.True(t, acc.HasField("ping", "standard_deviation_ms")) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + require.True(t, acc.HasField("ping", "percentile50_ms")) + require.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) + require.True(t, acc.HasField("ping", "percentile95_ms")) + require.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) + require.True(t, acc.HasField("ping", "percentile99_ms")) + require.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) + require.True(t, acc.HasField("ping", "percent_packet_loss")) + require.True(t, acc.HasField("ping", "minimum_response_ms")) + require.True(t, acc.HasField("ping", "average_response_ms")) + require.True(t, acc.HasField("ping", "maximum_response_ms")) + require.True(t, acc.HasField("ping", "standard_deviation_ms")) } } diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 6df8af3732a5f..77137b1700ef6 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -8,9 +8,9 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // Windows ping format ( should support multilanguage ?) @@ -44,22 +44,22 @@ Approximate round trip times in milli-seconds: func TestHost(t *testing.T) { trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 46, min, "Min 46") - assert.Equal(t, 57, max, "max 57") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 46, min, "Min 46") + require.Equal(t, 57, max, "max 57") trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 50, min, "Min 50") - assert.Equal(t, 52, max, "Max 52") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 50, min, "Min 50") + require.Equal(t, 52, max, "Max 52") } func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { @@ -239,21 +239,21 @@ func TestFatalPingGather(t *testing.T) { } acc.GatherError(p.Gather) - assert.True(t, acc.HasFloatField("ping", "errors"), + require.True(t, acc.HasFloatField("ping", "errors"), "Fatal ping should have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_transmitted"), + require.False(t, acc.HasInt64Field("ping", "packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_received"), + require.False(t, acc.HasInt64Field("ping", "packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"), + require.False(t, acc.HasFloatField("ping", "percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"), + require.False(t, acc.HasFloatField("ping", "percent_reply_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -297,13 +297,13 @@ func TestUnreachablePingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -345,13 +345,13 @@ func TestTTLExpiredPingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -362,7 +362,7 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index e2d271f51cba1..444313b7d6885 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -33,9 +33,10 @@ func getQueueDirectory() (string, error) { return strings.TrimSpace(string(qd)), nil } -func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { +func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error) { var length, size int64 var oldest time.Time + err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error { if err != nil { acc.AddError(fmt.Errorf("error scanning %s: %s", path, err)) @@ -57,9 +58,11 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { } return nil }) + if err != nil { - return 0, 0, 0, err + return nil, err } + var age int64 if !oldest.IsZero() { age = int64(time.Since(oldest) / time.Second) @@ -67,7 +70,13 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { // system doesn't support ctime age = -1 } - return length, size, age, nil + + fields := map[string]interface{}{"length": length, "size": size} + if age != -1 { + fields["age"] = age + } + + return fields, nil } type Postfix struct { @@ -84,15 +93,12 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error { } for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { - length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc) + fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) if err != nil { acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err)) continue } - fields := map[string]interface{}{"length": length, "size": size} - if age != -1 { - fields["age"] = age - } + acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) } diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 6ab6556a0cf07..e3032469c615a 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -8,9 +8,9 @@ import ( "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGather(t *testing.T) { @@ -41,20 +41,20 @@ func TestGather(t *testing.T) { metrics[m.Tags["queue"]] = m } - assert.Equal(t, int64(2), metrics["active"].Fields["length"]) - assert.Equal(t, int64(7), metrics["active"].Fields["size"]) - assert.InDelta(t, 0, metrics["active"].Fields["age"], 10) + require.Equal(t, int64(2), metrics["active"].Fields["length"]) + require.Equal(t, int64(7), metrics["active"].Fields["size"]) + require.InDelta(t, 0, metrics["active"].Fields["age"], 10) - assert.Equal(t, int64(1), metrics["hold"].Fields["length"]) - assert.Equal(t, int64(3), metrics["hold"].Fields["size"]) + require.Equal(t, int64(1), metrics["hold"].Fields["length"]) + require.Equal(t, int64(3), metrics["hold"].Fields["size"]) - assert.Equal(t, int64(1), metrics["incoming"].Fields["length"]) - assert.Equal(t, int64(4), metrics["incoming"].Fields["size"]) + require.Equal(t, int64(1), metrics["incoming"].Fields["length"]) + require.Equal(t, int64(4), metrics["incoming"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) - assert.Equal(t, int64(2), metrics["deferred"].Fields["length"]) - assert.Equal(t, int64(6), metrics["deferred"].Fields["size"]) + require.Equal(t, int64(2), metrics["deferred"].Fields["length"]) + require.Equal(t, int64(6), metrics["deferred"].Fields["size"]) } diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 934d06414b7e6..30cf776eb0e0a 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { @@ -71,27 +71,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { @@ -117,7 +117,7 @@ func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { point, ok := acc.Get("postgresql") require.True(t, ok) - assert.Equal(t, "postgres", point.Tags["db"]) + require.Equal(t, "postgres", point.Tags["db"]) } func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { @@ -150,7 +150,7 @@ func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { } } - assert.True(t, found) + require.True(t, found) } func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { @@ -172,7 +172,7 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { require.NoError(t, p.Gather(&acc)) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } @@ -212,8 +212,8 @@ func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) { } } - assert.True(t, foundTemplate0) - assert.False(t, foundTemplate1) + require.True(t, foundTemplate0) + require.False(t, foundTemplate1) } func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { @@ -251,6 +251,6 @@ func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { } } - assert.False(t, foundTemplate0) - assert.True(t, foundTemplate1) + require.False(t, foundTemplate0) + require.True(t, foundTemplate1) } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index e0793d4d2dbd6..e765316b007d3 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -142,7 +142,7 @@ func (p *Service) Stop() { p.DB.Close() } -var kvMatcher, _ = regexp.Compile("(password|sslcert|sslkey|sslmode|sslrootcert)=\\S+ ?") +var kvMatcher, _ = regexp.Compile(`(password|sslcert|sslkey|sslmode|sslrootcert)=\S+ ?`) // SanitizedAddress utility function to strip sensitive information from the connection string. func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 176827a4b1dc7..bb776abdc3c8b 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -161,10 +161,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { queryAddon string dbVersion int query string - tagValue string measName string - timestamp string - columns []string ) // Retrieving the database version @@ -177,8 +174,6 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // Query is not run if Database version does not match the query version. for i := range p.Query { sqlQuery = p.Query[i].Sqlquery - tagValue = p.Query[i].Tagvalue - timestamp = p.Query[i].Timestamp if p.Query[i].Measurement != "" { measName = p.Query[i].Measurement @@ -198,40 +193,46 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { sqlQuery += queryAddon if p.Query[i].Version <= dbVersion { - rows, err := p.DB.Query(sqlQuery) - if err != nil { - p.Log.Error(err.Error()) - continue - } + p.gatherMetricsFromQuery(acc, sqlQuery, p.Query[i].Tagvalue, p.Query[i].Timestamp, measName) + } + } + return nil +} - defer rows.Close() +func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, sqlQuery string, tagValue string, timestamp string, measName string) { + var columns []string - // grab the column information from the result - if columns, err = rows.Columns(); err != nil { - p.Log.Error(err.Error()) - continue - } + rows, err := p.DB.Query(sqlQuery) + if err != nil { + acc.AddError(err) + return + } - p.AdditionalTags = nil - if tagValue != "" { - tagList := strings.Split(tagValue, ",") - for t := range tagList { - p.AdditionalTags = append(p.AdditionalTags, tagList[t]) - } - } + defer rows.Close() - p.Timestamp = timestamp + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + acc.AddError(err) + return + } - for rows.Next() { - err = p.accRow(measName, rows, acc, columns) - if err != nil { - p.Log.Error(err.Error()) - break - } - } + p.AdditionalTags = nil + if tagValue != "" { + tagList := strings.Split(tagValue, ",") + for t := range tagList { + p.AdditionalTags = append(p.AdditionalTags, tagList[t]) + } + } + + p.Timestamp = timestamp + + for rows.Next() { + err = p.accRow(measName, rows, acc, columns) + if err != nil { + acc.AddError(err) + break } } - return nil } type scanner interface { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 399c236bffcea..fbcc7e1e8a7e2 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func queryRunner(t *testing.T, q query) *testutil.Accumulator { @@ -76,27 +76,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { @@ -109,30 +109,30 @@ func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { examples := map[string]func(*testutil.Accumulator){ "SELECT 10.0::float AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.FloatField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, 10.0, v) + require.True(t, found) + require.Equal(t, 10.0, v) }, "SELECT 10.0 AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "10.0", v) + require.True(t, found) + require.Equal(t, "10.0", v) }, "SELECT 'hello world' AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "hello world", v) + require.True(t, found) + require.Equal(t, "hello world", v) }, "SELECT true AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.BoolField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, true, v) + require.True(t, found) + require.Equal(t, true, v) }, "SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) { expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC) v, found := acc.BoolField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, true, v) - assert.True(t, acc.HasTimestamp(measurement, expectedTime)) + require.True(t, found) + require.Equal(t, true, v) + require.True(t, acc.HasTimestamp(measurement, expectedTime)) }, } @@ -192,22 +192,22 @@ func TestPostgresqlFieldOutputIntegration(t *testing.T) { for _, field := range intMetrics { _, found := acc.Int64Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) } for _, field := range int32Metrics { _, found := acc.Int32Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) } for _, field := range floatMetrics { _, found := acc.FloatField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) } for _, field := range stringMetrics { _, found := acc.StringField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a str", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a str", field)) } } @@ -256,9 +256,9 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) - assert.NotEmpty(t, p.IgnoredColumns()) + require.NotEmpty(t, p.IgnoredColumns()) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 5421c926a7745..196b0c12dd49f 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "io" - "log" "net" "strconv" "strings" @@ -16,6 +15,8 @@ import ( type Powerdns struct { UnixSockets []string + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -89,7 +90,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -99,7 +100,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error return nil } -func parseResponse(metrics string) map[string]interface{} { +func (p *Powerdns) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, ",") @@ -112,8 +113,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index bf7d3845f7dc9..5afa9008ae124 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -7,7 +7,6 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" @@ -108,12 +107,16 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { "meta-cache-size", "qsize-q", "signature-cache-size", "sys-msec", "uptime", "user-msec"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns", metric), metric) + require.True(t, acc.HasInt64Field("powerdns", metric), metric) } } func TestPowerdnsParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -173,7 +176,11 @@ func TestPowerdnsParseMetrics(t *testing.T) { } func TestPowerdnsParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -232,7 +239,11 @@ func TestPowerdnsParseCorruptMetrics(t *testing.T) { } func TestPowerdnsParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 190297f9f58a1..bc7ebc5b777f1 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -4,7 +4,6 @@ import ( "bufio" "errors" "fmt" - "log" "math/rand" "net" "os" @@ -22,6 +21,8 @@ type PowerdnsRecursor struct { SocketDir string `toml:"socket_dir"` SocketMode string `toml:"socket_mode"` + Log telegraf.Logger `toml:"-"` + mode uint32 } @@ -125,7 +126,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -135,7 +136,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator return conn.Close() } -func parseResponse(metrics string) map[string]interface{} { +func (p *PowerdnsRecursor) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, "\n") @@ -148,8 +149,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index e715fe4e2d165..a4fe9586cd8df 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + @@ -183,12 +183,16 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) + require.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) } } func TestPowerdnsRecursorParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -302,15 +306,17 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -423,15 +429,17 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string @@ -544,9 +552,7 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 144b80f3fc1ec..7fc0a76dac036 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestProcesses(t *testing.T) { @@ -27,13 +27,13 @@ func TestProcesses(t *testing.T) { err := processes.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("processes", "running")) - assert.True(t, acc.HasInt64Field("processes", "sleeping")) - assert.True(t, acc.HasInt64Field("processes", "stopped")) - assert.True(t, acc.HasInt64Field("processes", "total")) + require.True(t, acc.HasInt64Field("processes", "running")) + require.True(t, acc.HasInt64Field("processes", "sleeping")) + require.True(t, acc.HasInt64Field("processes", "stopped")) + require.True(t, acc.HasInt64Field("processes", "total")) total, ok := acc.Get("processes") require.True(t, ok) - assert.True(t, total.Fields["total"].(int64) > 0) + require.True(t, total.Fields["total"].(int64) > 0) } func TestFromPS(t *testing.T) { diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index 0148fdedca933..f6068ac268e0e 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -2,11 +2,9 @@ package procstat import ( "fmt" - "testing" - "os/user" + "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -19,7 +17,7 @@ func TestGather_RealPatternIntegration(t *testing.T) { pids, err := pg.Pattern(`procstat`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } func TestGather_RealFullPatternIntegration(t *testing.T) { @@ -31,7 +29,7 @@ func TestGather_RealFullPatternIntegration(t *testing.T) { pids, err := pg.FullPattern(`%procstat%`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } func TestGather_RealUserIntegration(t *testing.T) { @@ -45,5 +43,5 @@ func TestGather_RealUserIntegration(t *testing.T) { pids, err := pg.UID(user.Username) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index a8d8f3f51bfbd..93c64882ae835 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -43,13 +43,13 @@ type Proc struct { } func NewProc(pid PID) (Process, error) { - process, err := process.NewProcess(int32(pid)) + p, err := process.NewProcess(int32(pid)) if err != nil { return nil, err } proc := &Proc{ - Process: process, + Process: p, hasCPUTimes: false, tags: make(map[string]string), } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index bc586fca4fa42..237087aa577dc 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/process" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func init() { @@ -51,11 +51,13 @@ MainPID=11408 ControlPID=0 ExecMainPID=11408 `) + //nolint:revive // error code is important for this "test" os.Exit(0) } //nolint:errcheck,revive fmt.Printf("command not found\n") + //nolint:revive // error code is important for this "test" os.Exit(1) } @@ -208,7 +210,7 @@ func TestGather_ProcessName(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) + require.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) } func TestGather_NoProcessNameUsesReal(t *testing.T) { @@ -222,7 +224,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasTag("procstat", "process_name")) + require.True(t, acc.HasTag("procstat", "process_name")) } func TestGather_NoPidTag(t *testing.T) { @@ -234,8 +236,8 @@ func TestGather_NoPidTag(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "pid")) - assert.False(t, acc.HasTag("procstat", "pid")) + require.True(t, acc.HasInt32Field("procstat", "pid")) + require.False(t, acc.HasTag("procstat", "pid")) } func TestGather_PidTag(t *testing.T) { @@ -248,8 +250,8 @@ func TestGather_PidTag(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "42", acc.TagValue("procstat", "pid")) - assert.False(t, acc.HasInt32Field("procstat", "pid")) + require.Equal(t, "42", acc.TagValue("procstat", "pid")) + require.False(t, acc.HasInt32Field("procstat", "pid")) } func TestGather_Prefix(t *testing.T) { @@ -262,7 +264,7 @@ func TestGather_Prefix(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) + require.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) } func TestGather_Exe(t *testing.T) { @@ -275,7 +277,7 @@ func TestGather_Exe(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, exe, acc.TagValue("procstat", "exe")) + require.Equal(t, exe, acc.TagValue("procstat", "exe")) } func TestGather_User(t *testing.T) { @@ -289,7 +291,7 @@ func TestGather_User(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, user, acc.TagValue("procstat", "user")) + require.Equal(t, user, acc.TagValue("procstat", "user")) } func TestGather_Pattern(t *testing.T) { @@ -303,7 +305,7 @@ func TestGather_Pattern(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pattern, acc.TagValue("procstat", "pattern")) + require.Equal(t, pattern, acc.TagValue("procstat", "pattern")) } func TestGather_MissingPidMethod(t *testing.T) { @@ -327,7 +329,7 @@ func TestGather_PidFile(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) + require.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) } func TestGather_PercentFirstPass(t *testing.T) { @@ -342,8 +344,8 @@ func TestGather_PercentFirstPass(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.False(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.False(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_PercentSecondPass(t *testing.T) { @@ -359,8 +361,8 @@ func TestGather_PercentSecondPass(t *testing.T) { require.NoError(t, acc.GatherError(p.Gather)) require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.True(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.True(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_systemdUnitPIDs(t *testing.T) { @@ -374,8 +376,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) { tags := pidsTag.Tags err := pidsTag.Err require.NoError(t, err) - assert.Equal(t, []PID{11408}, pids) - assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + require.Equal(t, []PID{11408}, pids) + require.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) } } @@ -400,8 +402,8 @@ func TestGather_cgroupPIDs(t *testing.T) { tags := pidsTag.Tags err := pidsTag.Err require.NoError(t, err) - assert.Equal(t, []PID{1234, 5678}, pids) - assert.Equal(t, td, tags["cgroup"]) + require.Equal(t, []PID{1234, 5678}, pids) + require.Equal(t, td, tags["cgroup"]) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 2f67607cd3cf3..b763cd14825b2 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -3,21 +3,21 @@ package prometheus import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" ) func TestScrapeURLNoAnnotations(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Annotations = map[string]string{} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Nil(t, url) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotationsNoScrape(t *testing.T) { @@ -25,56 +25,56 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) { p.Name = "myPod" p.Annotations = map[string]string{"prometheus.io/scrape": "false"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Nil(t, url) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) } func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { @@ -83,7 +83,7 @@ func TestAddPod(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultipleDuplicatePods(t *testing.T) { @@ -94,7 +94,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) { registerPod(p, prom) p.Name = "Pod2" registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultiplePods(t *testing.T) { @@ -106,7 +106,7 @@ func TestAddMultiplePods(t *testing.T) { p.Name = "Pod2" p.Status.PodIP = "127.0.0.2" registerPod(p, prom) - assert.Equal(t, 2, len(prom.kubernetesPods)) + require.Equal(t, 2, len(prom.kubernetesPods)) } func TestDeletePods(t *testing.T) { @@ -116,7 +116,7 @@ func TestDeletePods(t *testing.T) { p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) unregisterPod(p, prom) - assert.Equal(t, 0, len(prom.kubernetesPods)) + require.Equal(t, 0, len(prom.kubernetesPods)) } func TestPodHasMatchingNamespace(t *testing.T) { @@ -126,12 +126,12 @@ func TestPodHasMatchingNamespace(t *testing.T) { pod.Name = "Pod1" pod.Namespace = "default" shouldMatch := podHasMatchingNamespace(pod, prom) - assert.Equal(t, true, shouldMatch) + require.Equal(t, true, shouldMatch) pod.Name = "Pod2" pod.Namespace = "namespace" shouldNotMatch := podHasMatchingNamespace(pod, prom) - assert.Equal(t, false, shouldNotMatch) + require.Equal(t, false, shouldNotMatch) } func TestPodHasMatchingLabelSelector(t *testing.T) { @@ -148,8 +148,8 @@ func TestPodHasMatchingLabelSelector(t *testing.T) { pod.Labels["label5"] = "label5" labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) - assert.Equal(t, err, nil) - assert.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) } func TestPodHasMatchingFieldSelector(t *testing.T) { @@ -160,8 +160,8 @@ func TestPodHasMatchingFieldSelector(t *testing.T) { pod.Spec.NodeName = "node1000" fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) - assert.Equal(t, err, nil) - assert.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) } func TestInvalidFieldSelector(t *testing.T) { @@ -172,7 +172,7 @@ func TestInvalidFieldSelector(t *testing.T) { pod.Spec.NodeName = "node1000" _, err := fields.ParseSelector(prom.KubernetesFieldSelector) - assert.NotEqual(t, err, nil) + require.NotEqual(t, err, nil) } func pod() *corev1.Pod { diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index dfe5cc4749813..49bfa2afa4d27 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -10,13 +10,13 @@ import ( "net/http" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" - "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" ) func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { @@ -63,11 +63,13 @@ func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Met // summary metric fields = makeQuantiles(m) fields["count"] = float64(m.GetSummary().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetSummary().GetSampleSum()) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric fields = makeBuckets(m) fields["count"] = float64(m.GetHistogram().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetHistogram().GetSampleSum()) } else { // standard metric @@ -106,6 +108,7 @@ func makeQuantiles(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) for _, q := range m.GetSummary().Quantile { if !math.IsNaN(q.GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue()) } } @@ -126,14 +129,17 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) if m.Gauge != nil { if !math.IsNaN(m.GetGauge().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["gauge"] = float64(m.GetGauge().GetValue()) } } else if m.Counter != nil { if !math.IsNaN(m.GetCounter().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["counter"] = float64(m.GetCounter().GetValue()) } } else if m.Untyped != nil { if !math.IsNaN(m.GetUntyped().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["value"] = float64(m.GetUntyped().GetValue()) } } diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index ffd5967458c9f..24470a441a6b3 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. @@ -45,13 +45,13 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 func TestParseValidPrometheus(t *testing.T) { // Gauge value metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "cadvisor_version_info", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "gauge": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "osVersion": "CentOS Linux 7 (Core)", "cadvisorRevision": "", "cadvisorVersion": "", @@ -61,35 +61,35 @@ func TestParseValidPrometheus(t *testing.T) { // Counter value metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "get_token_fail_count", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "get_token_fail_count", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(0), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Summary data //SetDefaultTags(map[string]string{}) metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "0.5": 552048.506, "0.9": 5.876804288e+06, "0.99": 5.876804288e+06, "count": 9.0, "sum": 1.8909097205e+07, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "apiserver_request_latencies", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "500000": 2000.0, "count": 2025.0, "sum": 1.02726334e+08, @@ -101,7 +101,7 @@ func TestParseValidPrometheus(t *testing.T) { "125000": 1994.0, "1e+06": 2005.0, }, metrics[0].Fields()) - assert.Equal(t, + require.Equal(t, map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) } @@ -116,27 +116,27 @@ test_counter{label="test"} 1 %d // IgnoreTimestamp is false metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "test_counter", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "label": "test", }, metrics[0].Tags()) - assert.Equal(t, testTime, metrics[0].Time().UTC()) + require.Equal(t, testTime, metrics[0].Time().UTC()) // IgnoreTimestamp is true metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "test_counter", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "label": "test", }, metrics[0].Tags()) - assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) + require.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 18cbf6c8b3d59..2f8e17f196b32 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -13,14 +13,15 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" + parserV2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -182,8 +183,7 @@ func (p *Prometheus) Description() string { } func (p *Prometheus) Init() error { - - // Config proccessing for node scrape scope for monitor_kubernetes_pods + // Config processing for node scrape scope for monitor_kubernetes_pods p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") if p.isNodeScrapeScope { // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address @@ -222,8 +222,6 @@ func (p *Prometheus) Init() error { return nil } -var ErrProtocolError = errors.New("prometheus protocol error") - func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { @@ -253,12 +251,12 @@ type URLAndAddress struct { func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { allURLs := make(map[string]URLAndAddress) for _, u := range p.URLs { - URL, err := url.Parse(u) + address, err := url.Parse(u) if err != nil { p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} + allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address} } p.lock.Lock() @@ -273,22 +271,22 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { } for _, service := range p.KubernetesServices { - URL, err := url.Parse(service) + address, err := url.Parse(service) if err != nil { return nil, err } - resolvedAddresses, err := net.LookupHost(URL.Hostname()) + resolvedAddresses, err := net.LookupHost(address.Hostname()) if err != nil { - p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error()) + p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", address.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { - serviceURL := p.AddressToURL(URL, resolved) + serviceURL := p.AddressToURL(address, resolved) allURLs[serviceURL.String()] = URLAndAddress{ URL: serviceURL, Address: resolved, - OriginalURL: URL, + OriginalURL: address, } } } @@ -401,8 +399,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error var resp *http.Response if u.URL.Scheme != "unix" { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = p.client.Do(req) } else { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = uClient.Do(req) } if err != nil { @@ -420,7 +420,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - parser := parser_v2.Parser{ + parser := parserV2.Parser{ Header: resp.Header, IgnoreTimestamp: p.IgnoreTimestamp, } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 11117e05b45d9..f56cfef8f59da 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/fields" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. @@ -67,12 +67,12 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.False(t, acc.HasTag("test_metric", "address")) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.False(t, acc.HasTag("test_metric", "address")) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") } func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { @@ -95,12 +95,12 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.True(t, acc.TagValue("test_metric", "address") == tsAddress) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.TagValue("test_metric", "address") == tsAddress) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL) } func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) { @@ -125,10 +125,10 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) } func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { @@ -149,10 +149,10 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0") - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + require.True(t, acc.TagSetValue("prometheus", "quantile") == "0") + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") } func TestSummaryMayContainNaN(t *testing.T) { @@ -237,9 +237,9 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("prometheus", "go_goroutines")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("prometheus", "go_goroutines")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + require.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { @@ -262,7 +262,7 @@ func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { require.NoError(t, err) m, _ := acc.Get("test_metric") - assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second) + require.WithinDuration(t, time.Now(), m.Time, 5*time.Second) } func TestUnsupportedFieldSelector(t *testing.T) { @@ -271,8 +271,8 @@ func TestUnsupportedFieldSelector(t *testing.T) { fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector) isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector) - assert.Equal(t, false, isValid) - assert.Equal(t, "spec.containerName", invalidSelector) + require.Equal(t, false, isValid) + require.Equal(t, "spec.containerName", invalidSelector) } func TestInitConfigErrors(t *testing.T) { diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 101b458630eeb..c8234a6d8e75c 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -213,30 +213,30 @@ func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, erro } func getFields(vmStat VMStat) map[string]interface{} { - memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) - swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) - diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) + memMetrics := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) + swapMetrics := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) + diskMetrics := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) return map[string]interface{}{ "status": vmStat.Status, "uptime": jsonNumberToInt64(vmStat.Uptime), "cpuload": jsonNumberToFloat64(vmStat.CPULoad), - "mem_used": memUsed, - "mem_total": memTotal, - "mem_free": memFree, - "mem_used_percentage": memUsedPercentage, - "swap_used": swapUsed, - "swap_total": swapTotal, - "swap_free": swapFree, - "swap_used_percentage": swapUsedPercentage, - "disk_used": diskUsed, - "disk_total": diskTotal, - "disk_free": diskFree, - "disk_used_percentage": diskUsedPercentage, + "mem_used": memMetrics.used, + "mem_total": memMetrics.total, + "mem_free": memMetrics.free, + "mem_used_percentage": memMetrics.usedPercentage, + "swap_used": swapMetrics.used, + "swap_total": swapMetrics.total, + "swap_free": swapMetrics.free, + "swap_used_percentage": swapMetrics.usedPercentage, + "disk_used": diskMetrics.used, + "disk_total": diskMetrics.total, + "disk_free": diskMetrics.free, + "disk_used_percentage": diskMetrics.usedPercentage, } } -func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) { +func getByteMetrics(total json.Number, used json.Number) metrics { int64Total := jsonNumberToInt64(total) int64Used := jsonNumberToInt64(used) int64Free := int64Total - int64Used @@ -245,7 +245,12 @@ func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, f usedPercentage = float64(int64Used) * 100 / float64(int64Total) } - return int64Total, int64Used, int64Free, usedPercentage + return metrics{ + total: int64Total, + used: int64Used, + free: int64Free, + usedPercentage: usedPercentage, + } } func jsonNumberToInt64(value json.Number) int64 { diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 741a272829474..b0916a5f3dd8e 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/bmizerany/assert" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` @@ -59,7 +59,7 @@ func TestGetNodeSearchDomain(t *testing.T) { err := getNodeSearchDomain(px) require.NoError(t, err) - assert.Equal(t, px.nodeSearchDomain, "test.example.com") + require.Equal(t, px.nodeSearchDomain, "test.example.com") } func TestGatherLxcData(t *testing.T) { @@ -69,7 +69,7 @@ func TestGatherLxcData(t *testing.T) { acc := &testutil.Accumulator{} gatherLxcData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2078164), @@ -103,7 +103,7 @@ func TestGatherQemuData(t *testing.T) { acc := &testutil.Accumulator{} gatherQemuData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2159739), @@ -139,5 +139,5 @@ func TestGather(t *testing.T) { require.NoError(t, err) // Results from both tests above - assert.Equal(t, acc.NFields(), 30) + require.Equal(t, acc.NFields(), 30) } diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index 2f16841b2ff8b..78d0010b501eb 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -67,3 +67,10 @@ type NodeDNS struct { Searchdomain string `json:"search"` } `json:"data"` } + +type metrics struct { + total int64 + used int64 + free int64 + usedPercentage float64 +} From db869047596ae9e78d1009bfcf3adf46a86b7032 Mon Sep 17 00:00:00 2001 From: Ehsan <57578566+etycomputer@users.noreply.github.com> Date: Wed, 17 Nov 2021 08:05:48 +1000 Subject: [PATCH 043/133] fix: directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 (#9865) --- .../directory_monitor/directory_monitor.go | 24 +- .../directory_monitor_test.go | 224 +++++++++++++++++- plugins/inputs/tail/tail.go | 25 +- plugins/inputs/tail/tail_test.go | 61 +++++ plugins/parsers/csv/parser.go | 101 ++++---- plugins/parsers/csv/parser_test.go | 156 +++++++++++- 6 files changed, 499 insertions(+), 92 deletions(-) diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index ee1163e7a51b1..6c115bdf9769b 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -261,15 +261,12 @@ func (monitor *DirectoryMonitor) ingestFile(filePath string) error { } func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader, fileName string) error { - // Read the file line-by-line and parse with the configured parse method. - firstLine := true scanner := bufio.NewScanner(reader) for scanner.Scan() { - metrics, err := monitor.parseLine(parser, scanner.Bytes(), firstLine) + metrics, err := monitor.parseLine(parser, scanner.Bytes()) if err != nil { return err } - firstLine = false if monitor.FileTag != "" { for _, m := range metrics { @@ -285,24 +282,17 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read return nil } -func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte, firstLine bool) ([]telegraf.Metric, error) { +func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte) ([]telegraf.Metric, error) { switch parser.(type) { case *csv.Parser: - // The CSV parser parses headers in Parse and skips them in ParseLine. - if firstLine { - return parser.Parse(line) - } - - m, err := parser.ParseLine(string(line)) + m, err := parser.Parse(line) if err != nil { + if errors.Is(err, io.EOF) { + return nil, nil + } return nil, err } - - if m != nil { - return []telegraf.Metric{m}, nil - } - - return []telegraf.Metric{}, nil + return m, err default: return parser.Parse(line) } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 3e954adb40320..3245074711fb2 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -3,12 +3,11 @@ package directory_monitor import ( "bytes" "compress/gzip" + "github.com/stretchr/testify/require" "os" "path/filepath" "testing" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" ) @@ -193,3 +192,224 @@ func TestFileTag(t *testing.T) { } } } + +func TestCSVNoSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + CSVSkipRows: 0, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + CSVSkipRows: 2, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `garbage nonsense 1 +garbage,nonsense,2 +line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVMultiHeader(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 2, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `line,line,line +1,2,3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index d5bda84732ad8..49b25924d5cc1 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -288,25 +288,17 @@ func (t *Tail) tailNewFiles(fromBeginning bool) error { } // ParseLine parses a line of text. -func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.Metric, error) { +func parseLine(parser parsers.Parser, line string) ([]telegraf.Metric, error) { switch parser.(type) { case *csv.Parser: - // The csv parser parses headers in Parse and skips them in ParseLine. - // As a temporary solution call Parse only when getting the first - // line from the file. - if firstLine { - return parser.Parse([]byte(line)) - } - - m, err := parser.ParseLine(line) + m, err := parser.Parse([]byte(line)) if err != nil { + if errors.Is(err, io.EOF) { + return nil, nil + } return nil, err } - - if m != nil { - return []telegraf.Metric{m}, nil - } - return []telegraf.Metric{}, nil + return m, err default: return parser.Parse([]byte(line)) } @@ -315,8 +307,6 @@ func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.M // Receiver is launched as a goroutine to continuously watch a tailed logfile // for changes, parse any incoming msgs, and add to the accumulator. func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { - var firstLine = true - // holds the individual lines of multi-line log entries. var buffer bytes.Buffer @@ -378,13 +368,12 @@ func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { continue } - metrics, err := parseLine(parser, text, firstLine) + metrics, err := parseLine(parser, text) if err != nil { t.Log.Errorf("Malformed log line in %q: [%q]: %s", tailer.Filename, text, err.Error()) continue } - firstLine = false if t.PathTag != "" { for _, metric := range metrics { diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 1098a10edbff5..908ce1087e872 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -342,6 +342,67 @@ cpu,42 testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } +func TestCSVMultiHeaderWithSkipRowANDColumn(t *testing.T) { + tmpfile, err := os.CreateTemp("", "") + require.NoError(t, err) + defer os.Remove(tmpfile.Name()) + + _, err = tmpfile.WriteString(`garbage nonsense +skip,measurement,value +row,1,2 +skip1,cpu,42 +skip2,mem,100 +`) + require.NoError(t, err) + require.NoError(t, tmpfile.Close()) + + plugin := NewTestTail() + plugin.Log = testutil.Logger{} + plugin.FromBeginning = true + plugin.Files = []string{tmpfile.Name()} + plugin.SetParserFunc(func() (parsers.Parser, error) { + return csv.NewParser(&csv.Config{ + MeasurementColumn: "measurement1", + HeaderRowCount: 2, + SkipRows: 1, + SkipColumns: 1, + TimeFunc: func() time.Time { return time.Unix(0, 0) }, + }) + }) + + err = plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Start(&acc) + require.NoError(t, err) + defer plugin.Stop() + err = plugin.Gather(&acc) + require.NoError(t, err) + acc.Wait(2) + plugin.Stop() + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "value2": 42, + }, + time.Unix(0, 0)), + testutil.MustMetric("mem", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "value2": 100, + }, + time.Unix(0, 0)), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { tmpfile, err := os.CreateTemp("", "") diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go index 8f4969efb70bd..3f46c24b946a2 100644 --- a/plugins/parsers/csv/parser.go +++ b/plugins/parsers/csv/parser.go @@ -96,49 +96,69 @@ func (p *Parser) compile(r io.Reader) *csv.Reader { func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { r := bytes.NewReader(buf) + return parseCSV(p, r) +} + +// ParseLine does not use any information in header and assumes DataColumns is set +// it will also not skip any rows +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + r := bytes.NewReader([]byte(line)) + metrics, err := parseCSV(p, r) + if err != nil { + return nil, err + } + if len(metrics) == 1 { + return metrics[0], nil + } + if len(metrics) > 1 { + return nil, fmt.Errorf("expected 1 metric found %d", len(metrics)) + } + return nil, nil +} + +func parseCSV(p *Parser, r io.Reader) ([]telegraf.Metric, error) { csvReader := p.compile(r) // skip first rows - for i := 0; i < p.SkipRows; i++ { + for p.SkipRows > 0 { _, err := csvReader.Read() if err != nil { return nil, err } + p.SkipRows-- } - // if there is a header and we did not get DataColumns + // if there is a header, and we did not get DataColumns // set DataColumns to names extracted from the header // we always reread the header to avoid side effects // in cases where multiple files with different // headers are read - if !p.gotColumnNames { - headerNames := make([]string, 0) - for i := 0; i < p.HeaderRowCount; i++ { - header, err := csvReader.Read() - if err != nil { - return nil, err - } - //concatenate header names - for i := range header { - name := header[i] - if p.TrimSpace { - name = strings.Trim(name, " ") - } - if len(headerNames) <= i { - headerNames = append(headerNames, name) - } else { - headerNames[i] = headerNames[i] + name - } - } + for p.HeaderRowCount > 0 { + header, err := csvReader.Read() + if err != nil { + return nil, err } - p.ColumnNames = headerNames[p.SkipColumns:] - } else { - // if columns are named, just skip header rows - for i := 0; i < p.HeaderRowCount; i++ { - _, err := csvReader.Read() - if err != nil { - return nil, err + p.HeaderRowCount-- + if p.gotColumnNames { + // Ignore header lines if columns are named + continue + } + //concatenate header names + for i, h := range header { + name := h + if p.TrimSpace { + name = strings.Trim(name, " ") + } + if len(p.ColumnNames) <= i { + p.ColumnNames = append(p.ColumnNames, name) + } else { + p.ColumnNames[i] = p.ColumnNames[i] + name } } } + if !p.gotColumnNames { + // skip first rows + p.ColumnNames = p.ColumnNames[p.SkipColumns:] + p.gotColumnNames = true + } table, err := csvReader.ReadAll() if err != nil { @@ -156,27 +176,6 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } -// ParseLine does not use any information in header and assumes DataColumns is set -// it will also not skip any rows -func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - r := bytes.NewReader([]byte(line)) - csvReader := p.compile(r) - // if there is nothing in DataColumns, ParseLine will fail - if len(p.ColumnNames) == 0 { - return nil, fmt.Errorf("[parsers.csv] data columns must be specified") - } - - record, err := csvReader.Read() - if err != nil { - return nil, err - } - m, err := p.parseRecord(record) - if err != nil { - return nil, err - } - return m, nil -} - func (p *Parser) parseRecord(record []string) (telegraf.Metric, error) { recordFields := make(map[string]interface{}) tags := make(map[string]string) @@ -289,7 +288,7 @@ outer: // will be the current timestamp, else it will try to parse the time according // to the format. func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, - timestampColumn, timestampFormat string, Timezone string, + timestampColumn, timestampFormat string, timezone string, ) (time.Time, error) { if timestampColumn != "" { if recordFields[timestampColumn] == nil { @@ -300,7 +299,7 @@ func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface case "": return time.Time{}, fmt.Errorf("timestamp format must be specified") default: - metricTime, err := internal.ParseTimestamp(timestampFormat, recordFields[timestampColumn], Timezone) + metricTime, err := internal.ParseTimestamp(timestampFormat, recordFields[timestampColumn], timezone) if err != nil { return time.Time{}, err } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 8e4a5181c7969..7eb1d0d8dbed0 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -2,6 +2,7 @@ package csv import ( "fmt" + "io" "testing" "time" @@ -59,9 +60,33 @@ func TestHeaderOverride(t *testing.T) { require.NoError(t, err) testCSV := `line1,line2,line3 3.4,70,test_name` + expectedFields := map[string]interface{}{ + "first": 3.4, + "second": int64(70), + } metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "test_name", metrics[0].Name()) + require.Equal(t, expectedFields, metrics[0].Fields()) + + testCSVRows := []string{"line1,line2,line3\r\n", "3.4,70,test_name\r\n"} + + p, err = NewParser( + &Config{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + metrics, err = p.Parse([]byte(testCSVRows[0])) + require.NoError(t, err) + require.Equal(t, []telegraf.Metric{}, metrics) + m, err := p.ParseLine(testCSVRows[1]) + require.NoError(t, err) + require.Equal(t, "test_name", m.Name()) + require.Equal(t, expectedFields, m.Fields()) } func TestTimestamp(t *testing.T) { @@ -293,6 +318,22 @@ func TestTrimSpace(t *testing.T) { metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, expectedFields, metrics[0].Fields()) + + p, err = NewParser( + &Config{ + HeaderRowCount: 2, + TrimSpace: true, + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + testCSV = " col , col ,col\n" + + " 1 , 2 ,3\n" + + " test space , 80 ,test_name" + + metrics, err = p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, map[string]interface{}{"col1": "test space", "col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } func TestTrimSpaceDelimitedBySpace(t *testing.T) { @@ -332,6 +373,7 @@ func TestSkipRows(t *testing.T) { TimeFunc: DefaultTime, }, ) + require.NoError(t, err) testCSV := `garbage nonsense line1,line2,line3 hello,80,test_name2` @@ -339,10 +381,39 @@ hello,80,test_name2` expectedFields := map[string]interface{}{ "line2": int64(80), } + expectedTags := map[string]string{ + "line1": "hello", + } metrics, err := p.Parse([]byte(testCSV)) require.NoError(t, err) require.Equal(t, "test_name2", metrics[0].Name()) require.Equal(t, expectedFields, metrics[0].Fields()) + require.Equal(t, expectedTags, metrics[0].Tags()) + + p, err = NewParser( + &Config{ + HeaderRowCount: 1, + SkipRows: 1, + TagColumns: []string{"line1"}, + MeasurementColumn: "line3", + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + testCSVRows := []string{"garbage nonsense\r\n", "line1,line2,line3\r\n", "hello,80,test_name2\r\n"} + + metrics, err = p.Parse([]byte(testCSVRows[0])) + require.Error(t, io.EOF, err) + require.Error(t, err) + require.Nil(t, metrics) + m, err := p.ParseLine(testCSVRows[1]) + require.NoError(t, err) + require.Nil(t, m) + m, err = p.ParseLine(testCSVRows[2]) + require.NoError(t, err) + require.Equal(t, "test_name2", m.Name()) + require.Equal(t, expectedFields, m.Fields()) + require.Equal(t, expectedTags, m.Tags()) } func TestSkipColumns(t *testing.T) { @@ -375,8 +446,8 @@ func TestSkipColumnsWithHeader(t *testing.T) { ) require.NoError(t, err) testCSV := `col,col,col - 1,2,3 - trash,80,test_name` +1,2,3 +trash,80,test_name` // we should expect an error if we try to get col1 metrics, err := p.Parse([]byte(testCSV)) @@ -384,6 +455,44 @@ func TestSkipColumnsWithHeader(t *testing.T) { require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) } +func TestMultiHeader(t *testing.T) { + p, err := NewParser( + &Config{ + HeaderRowCount: 2, + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + testCSV := `col,col +1,2 +80,test_name` + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, metrics[0].Fields()) + + testCSVRows := []string{"col,col\r\n", "1,2\r\n", "80,test_name\r\n"} + + p, err = NewParser( + &Config{ + HeaderRowCount: 2, + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + + metrics, err = p.Parse([]byte(testCSVRows[0])) + require.Error(t, io.EOF, err) + require.Error(t, err) + require.Nil(t, metrics) + m, err := p.ParseLine(testCSVRows[1]) + require.NoError(t, err) + require.Nil(t, m) + m, err = p.ParseLine(testCSVRows[2]) + require.NoError(t, err) + require.Equal(t, map[string]interface{}{"col1": int64(80), "col2": "test_name"}, m.Fields()) +} + func TestParseStream(t *testing.T) { p, err := NewParser( &Config{ @@ -400,7 +509,8 @@ func TestParseStream(t *testing.T) { metrics, err := p.Parse([]byte(csvHeader)) require.NoError(t, err) require.Len(t, metrics, 0) - metric, err := p.ParseLine(csvBody) + m, err := p.ParseLine(csvBody) + require.NoError(t, err) testutil.RequireMetricEqual(t, testutil.MustMetric( "csv", @@ -411,7 +521,45 @@ func TestParseStream(t *testing.T) { "c": int64(3), }, DefaultTime(), - ), metric) + ), m) +} + +func TestParseLineMultiMetricErrorMessage(t *testing.T) { + p, err := NewParser( + &Config{ + MetricName: "csv", + HeaderRowCount: 1, + TimeFunc: DefaultTime, + }, + ) + require.NoError(t, err) + + csvHeader := "a,b,c" + csvOneRow := "1,2,3" + csvTwoRows := "4,5,6\n7,8,9" + + metrics, err := p.Parse([]byte(csvHeader)) + require.NoError(t, err) + require.Len(t, metrics, 0) + m, err := p.ParseLine(csvOneRow) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + "c": int64(3), + }, + DefaultTime(), + ), m) + m, err = p.ParseLine(csvTwoRows) + require.Errorf(t, err, "expected 1 metric found 2") + require.Nil(t, m) + metrics, err = p.Parse([]byte(csvTwoRows)) + require.NoError(t, err) + require.Len(t, metrics, 2) } func TestTimestampUnixFloatPrecision(t *testing.T) { From f71695bc98c2cabb1272b79e71a82ec69423dfc4 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Tue, 16 Nov 2021 14:11:24 -0800 Subject: [PATCH 044/133] docs: update deprecated plugin readmes (#10100) --- plugins/inputs/httpjson/README.md | 4 ++-- plugins/inputs/jolokia/README.md | 2 +- plugins/inputs/kafka_consumer_legacy/README.md | 2 ++ plugins/inputs/logparser/README.md | 4 ++-- plugins/inputs/snmp_legacy/README.md | 4 ++++ 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 19fe014457734..3f7efb10a4098 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,8 +1,8 @@ # HTTP JSON Input Plugin -The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. +### DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement. -Deprecated (1.6): use the [http](../http) input. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. ### Configuration: diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 96ee48701b464..9f2a658f16247 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,6 +1,6 @@ # Jolokia Input Plugin -**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +### Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. #### Configuration diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 2f0c219ea8647..86ccaa4c1dc09 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,5 +1,7 @@ # Kafka Consumer Legacy Input Plugin +### Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer). + The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 0abdba2c972df..8cc513e98cb70 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,11 +1,11 @@ # Logparser Input Plugin +### Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. - The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: - For options in the `[inputs.logparser.grok]` section, the equivalent option diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 06bebbcad6176..8e639900ffe0f 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,5 +1,7 @@ # SNMP Legacy Input Plugin +### Deprecated in version 1.0. Use [SNMP input plugin][]. + The SNMP input plugin gathers metrics from SNMP agents ### Configuration: @@ -547,3 +549,5 @@ ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 ``` + +[SNMP input plugin]: /plugins/inputs/snmp From 4e4a33003bfe6607cfa9f1f7315aa6c5e5c3cf6a Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 16 Nov 2021 16:17:23 -0600 Subject: [PATCH 045/133] fix(outputs/graylog): fix failing test due to port already in use (#10074) --- go.mod | 2 ++ go.sum | 3 +++ plugins/outputs/graylog/graylog_test.go | 9 ++++----- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index c0f59b6da7b73..1ae30029ffadd 100644 --- a/go.mod +++ b/go.mod @@ -329,6 +329,8 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) +require github.com/libp2p/go-reuseport v0.1.0 + require ( github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect diff --git a/go.sum b/go.sum index cd8ea8078be8b..1101853e2efa8 100644 --- a/go.sum +++ b/go.sum @@ -1423,6 +1423,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -2421,6 +2423,7 @@ golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index 3932c736c2aff..a270f279b631f 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -13,6 +13,7 @@ import ( tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" + reuse "github.com/libp2p/go-reuseport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -161,9 +162,7 @@ func TestWriteTCP(t *testing.T) { type GelfObject map[string]interface{} func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Graylog) { - serverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:12201") - require.NoError(t, err) - udpServer, err := net.ListenUDP("udp", serverAddr) + udpServer, err := reuse.ListenPacket("udp", "127.0.0.1:12201") require.NoError(t, err) defer udpServer.Close() defer wg.Done() @@ -171,7 +170,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Gr recv := func() { bufR := make([]byte, 1024) - n, _, err := udpServer.ReadFromUDP(bufR) + n, _, err := udpServer.ReadFrom(bufR) require.NoError(t, err) b := bytes.NewReader(bufR[0:n]) @@ -203,7 +202,7 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Gr } func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup, tlsConfig *tls.Config) { - tcpServer, err := net.Listen("tcp", "127.0.0.1:12201") + tcpServer, err := reuse.Listen("tcp", "127.0.0.1:12201") require.NoError(t, err) defer tcpServer.Close() defer wg.Done() From 5549bf0f59713c8e34e6bd62afdb9c47be3bb09e Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Wed, 17 Nov 2021 08:24:31 -0600 Subject: [PATCH 046/133] fix(inputs/zfs): resolve README.md linter issues (#10109) --- plugins/inputs/zfs/README.md | 72 +++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 34 deletions(-) diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index 1f3f125d391ec..77b101915bbe6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -4,7 +4,7 @@ This ZFS plugin provides metrics from your ZFS filesystems. It supports ZFS on Linux and FreeBSD. It gets ZFS stat from `/proc/spl/kstat/zfs` on Linux and from `sysctl`, 'zfs' and `zpool` on FreeBSD. -### Configuration: +## Configuration ```toml [[inputs.zfs]] @@ -27,7 +27,7 @@ from `sysctl`, 'zfs' and `zpool` on FreeBSD. # datasetMetrics = false ``` -### Measurements & Fields: +### Measurements & Fields By default this plugin collects metrics about ZFS internals pool and dataset. These metrics are either counters or measure sizes @@ -189,53 +189,53 @@ each dataset. On Linux (reference: kstat accumulated time and queue length statistics): - zfs_pool - - nread (integer, bytes) - - nwritten (integer, bytes) - - reads (integer, count) - - writes (integer, count) - - wtime (integer, nanoseconds) - - wlentime (integer, queuelength * nanoseconds) - - wupdate (integer, timestamp) - - rtime (integer, nanoseconds) - - rlentime (integer, queuelength * nanoseconds) - - rupdate (integer, timestamp) - - wcnt (integer, count) - - rcnt (integer, count) + - nread (integer, bytes) + - nwritten (integer, bytes) + - reads (integer, count) + - writes (integer, count) + - wtime (integer, nanoseconds) + - wlentime (integer, queuelength * nanoseconds) + - wupdate (integer, timestamp) + - rtime (integer, nanoseconds) + - rlentime (integer, queuelength * nanoseconds) + - rupdate (integer, timestamp) + - wcnt (integer, count) + - rcnt (integer, count) On FreeBSD: - zfs_pool - - allocated (integer, bytes) - - capacity (integer, bytes) - - dedupratio (float, ratio) - - free (integer, bytes) - - size (integer, bytes) - - fragmentation (integer, percent) + - allocated (integer, bytes) + - capacity (integer, bytes) + - dedupratio (float, ratio) + - free (integer, bytes) + - size (integer, bytes) + - fragmentation (integer, percent) #### Dataset Metrics (optional, only on FreeBSD) - zfs_dataset - - avail (integer, bytes) - - used (integer, bytes) - - usedsnap (integer, bytes - - usedds (integer, bytes) + - avail (integer, bytes) + - used (integer, bytes) + - usedsnap (integer, bytes + - usedds (integer, bytes) -### Tags: +### Tags - ZFS stats (`zfs`) will have the following tag: - - pools - A `::` concatenated list of all ZFS pools on the machine. - - datasets - A `::` concatenated list of all ZFS datasets on the machine. + - pools - A `::` concatenated list of all ZFS pools on the machine. + - datasets - A `::` concatenated list of all ZFS datasets on the machine. - Pool metrics (`zfs_pool`) will have the following tag: - - pool - with the name of the pool which the metrics are for. - - health - the health status of the pool. (FreeBSD only) + - pool - with the name of the pool which the metrics are for. + - health - the health status of the pool. (FreeBSD only) - Dataset metrics (`zfs_dataset`) will have the following tag: - - dataset - with the name of the dataset which the metrics are for. + - dataset - with the name of the dataset which the metrics are for. -### Example Output: +### Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter zfs --test * Plugin: zfs, Collection 1 > zfs_pool,health=ONLINE,pool=zroot allocated=1578590208i,capacity=2i,dedupratio=1,fragmentation=1i,free=64456531968i,size=66035122176i 1464473103625653908 @@ -287,8 +287,9 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool without an l2 device. - - The zfs property secondary cache. + +- We have multiple pools, we evicted something from a pool without an l2 device. +- The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. @@ -313,6 +314,7 @@ A short description for some of the metrics. `zfetchstats_stride_hits` Counts the number of cache hits, to items which are in the cache because of the prefetcher (prefetched stride reads) #### Vdev Cache Stats (FreeBSD only) + note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_hits` Hits to the vdev (device level) cache. @@ -320,6 +322,7 @@ note: the vdev cache is deprecated in some ZFS implementations `vdev_cache_stats_misses` Misses to the vdev (device level) cache. #### ABD Stats (Linux Only) + ABD is a linear/scatter dual typed buffer for ARC `abdstats_linear_cnt` number of linear ABDs which are currently allocated @@ -343,6 +346,7 @@ ABD is a linear/scatter dual typed buffer for ARC `fm_erpt-dropped` counts when an error report cannot be created (eg available memory is too low) #### ZIL (Linux Only) + note: ZIL measurements are system-wide, neither per-pool nor per-dataset `zil_commit_count` counts when ZFS transactions are committed to a ZIL From f82528dd8a60f168418db2e1515f2f93096ee721 Mon Sep 17 00:00:00 2001 From: Josh Powers Date: Wed, 17 Nov 2021 12:57:57 -0700 Subject: [PATCH 047/133] Update changelog (cherry picked from commit 2fd588f09e2b4ee6da31158b16deb2cf09ad70ef) --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ etc/telegraf.conf | 8 +++++--- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d03253afbcff7..ed54e1ff44f17 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,38 @@ +## v1.20.4 [2021-11-17] + +#### Release Notes + + - [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 + - [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + + - [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* + - [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* + - [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* + - [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +#### Bugfixes + + - [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 + - [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI + - [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 + - [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up + - [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int + - [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd + - [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library + - [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" + - [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly + - [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 + - [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver + - [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs + - [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +#### Features +#### New Input Plugins +#### New Output Plugins +#### New External Plugins + ## v1.20.3 [2021-10-27] #### Release Notes diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 49aa4c327c287..91a887bdb3927 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -5030,7 +5030,7 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] # # ## When true, collect cluster status # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -5469,7 +5469,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -7857,7 +7859,7 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] From 146fff31832e74e0db31ed6405400a3b18e9ccdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 18 Nov 2021 15:22:43 +0100 Subject: [PATCH 048/133] fix: Linter fixes for plugins/inputs/[t-z]* (#10105) --- plugins/inputs/tail/multiline.go | 8 +- plugins/inputs/tail/multiline_test.go | 119 +++++++++--------- plugins/inputs/tail/tail_test.go | 16 ++- plugins/inputs/tcp_listener/tcp_listener.go | 3 +- .../inputs/tcp_listener/tcp_listener_test.go | 17 ++- plugins/inputs/tengine/tengine_test.go | 6 +- plugins/inputs/twemproxy/twemproxy.go | 9 +- plugins/inputs/twemproxy/twemproxy_test.go | 18 ++- plugins/inputs/udp_listener/udp_listener.go | 6 +- plugins/inputs/unbound/unbound_test.go | 21 ++-- plugins/inputs/uwsgi/uwsgi.go | 18 +-- plugins/inputs/varnish/varnish_test.go | 20 +-- plugins/inputs/vsphere/endpoint.go | 49 ++++---- plugins/inputs/vsphere/finder.go | 14 +-- plugins/inputs/vsphere/tscache.go | 13 +- plugins/inputs/vsphere/vsphere_test.go | 12 +- .../win_perf_counters_integration_test.go | 35 +++--- .../win_perf_counters_test.go | 58 ++++----- .../inputs/win_services/win_services_test.go | 14 +-- plugins/inputs/wireguard/wireguard.go | 11 +- plugins/inputs/wireguard/wireguard_test.go | 9 +- plugins/inputs/wireless/wireless_linux.go | 17 ++- plugins/inputs/wireless/wireless_test.go | 14 ++- plugins/inputs/x509_cert/x509_cert_test.go | 50 ++++---- plugins/inputs/zipkin/zipkin.go | 5 +- plugins/inputs/zipkin/zipkin_test.go | 5 +- plugins/inputs/zookeeper/zookeeper_test.go | 6 +- 27 files changed, 285 insertions(+), 288 deletions(-) diff --git a/plugins/inputs/tail/multiline.go b/plugins/inputs/tail/multiline.go index 7ea2e460b88d6..c8d5d4f6d57d0 100644 --- a/plugins/inputs/tail/multiline.go +++ b/plugins/inputs/tail/multiline.go @@ -122,14 +122,14 @@ func (w *MultilineMatchWhichLine) UnmarshalText(data []byte) (err error) { switch strings.ToUpper(s) { case `PREVIOUS`, `"PREVIOUS"`, `'PREVIOUS'`: *w = Previous - return + return nil case `NEXT`, `"NEXT"`, `'NEXT'`: *w = Next - return + return nil } *w = -1 - return fmt.Errorf("E! [inputs.tail] unknown multiline MatchWhichLine") + return fmt.Errorf("unknown multiline MatchWhichLine") } // MarshalText implements encoding.TextMarshaler @@ -138,5 +138,5 @@ func (w MultilineMatchWhichLine) MarshalText() ([]byte, error) { if s != "" { return []byte(s), nil } - return nil, fmt.Errorf("E! [inputs.tail] unknown multiline MatchWhichLine") + return nil, fmt.Errorf("unknown multiline MatchWhichLine") } diff --git a/plugins/inputs/tail/multiline_test.go b/plugins/inputs/tail/multiline_test.go index 26a7e80292772..70111f2389031 100644 --- a/plugins/inputs/tail/multiline_test.go +++ b/plugins/inputs/tail/multiline_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" - "github.com/stretchr/testify/assert" ) func TestMultilineConfigOK(t *testing.T) { @@ -17,7 +18,7 @@ func TestMultilineConfigOK(t *testing.T) { _, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") } func TestMultilineConfigError(t *testing.T) { @@ -28,7 +29,7 @@ func TestMultilineConfigError(t *testing.T) { _, err := c.NewMultiline() - assert.Error(t, err, "The pattern was invalid") + require.Error(t, err, "The pattern was invalid") } func TestMultilineConfigTimeoutSpecified(t *testing.T) { @@ -39,9 +40,9 @@ func TestMultilineConfigTimeoutSpecified(t *testing.T) { Timeout: &duration, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, *m.config.Timeout) + require.Equal(t, duration, *m.config.Timeout) } func TestMultilineConfigDefaultTimeout(t *testing.T) { @@ -51,9 +52,9 @@ func TestMultilineConfigDefaultTimeout(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") - assert.Equal(t, duration, *m.config.Timeout) + require.Equal(t, duration, *m.config.Timeout) } func TestMultilineIsEnabled(t *testing.T) { @@ -62,11 +63,11 @@ func TestMultilineIsEnabled(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") isEnabled := m.IsEnabled() - assert.True(t, isEnabled, "Should have been enabled") + require.True(t, isEnabled, "Should have been enabled") } func TestMultilineIsDisabled(t *testing.T) { @@ -74,11 +75,11 @@ func TestMultilineIsDisabled(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") isEnabled := m.IsEnabled() - assert.False(t, isEnabled, "Should have been disabled") + require.False(t, isEnabled, "Should have been disabled") } func TestMultilineFlushEmpty(t *testing.T) { @@ -87,12 +88,12 @@ func TestMultilineFlushEmpty(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer text := m.Flush(&buffer) - assert.Empty(t, text) + require.Empty(t, text) } func TestMultilineFlush(t *testing.T) { @@ -101,15 +102,15 @@ func TestMultilineFlush(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer _, err = buffer.WriteString("foo") - assert.NoError(t, err) + require.NoError(t, err) text := m.Flush(&buffer) - assert.Equal(t, "foo", text) - assert.Zero(t, buffer.Len()) + require.Equal(t, "foo", text) + require.Zero(t, buffer.Len()) } func TestMultiLineProcessLinePrevious(t *testing.T) { @@ -118,28 +119,28 @@ func TestMultiLineProcessLinePrevious(t *testing.T) { MatchWhichLine: Previous, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer text := m.ProcessLine("1", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("=>2", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("=>3", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("4", &buffer) - assert.Equal(t, "1=>2=>3", text) - assert.NotZero(t, buffer.Len()) + require.Equal(t, "1=>2=>3", text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("5", &buffer) - assert.Equal(t, "4", text) - assert.Equal(t, "5", buffer.String()) + require.Equal(t, "4", text) + require.Equal(t, "5", buffer.String()) } func TestMultiLineProcessLineNext(t *testing.T) { @@ -148,28 +149,28 @@ func TestMultiLineProcessLineNext(t *testing.T) { MatchWhichLine: Next, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") var buffer bytes.Buffer text := m.ProcessLine("1=>", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("2=>", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("3=>", &buffer) - assert.Empty(t, text) - assert.NotZero(t, buffer.Len()) + require.Empty(t, text) + require.NotZero(t, buffer.Len()) text = m.ProcessLine("4", &buffer) - assert.Equal(t, "1=>2=>3=>4", text) - assert.Zero(t, buffer.Len()) + require.Equal(t, "1=>2=>3=>4", text) + require.Zero(t, buffer.Len()) text = m.ProcessLine("5", &buffer) - assert.Equal(t, "5", text) - assert.Zero(t, buffer.Len()) + require.Equal(t, "5", text) + require.Zero(t, buffer.Len()) } func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) { @@ -179,13 +180,13 @@ func TestMultiLineMatchStringWithInvertMatchFalse(t *testing.T) { InvertMatch: false, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") matches1 := m.matchString("t=>") matches2 := m.matchString("t") - assert.True(t, matches1) - assert.False(t, matches2) + require.True(t, matches1) + require.False(t, matches2) } func TestMultiLineMatchStringWithInvertTrue(t *testing.T) { @@ -195,41 +196,41 @@ func TestMultiLineMatchStringWithInvertTrue(t *testing.T) { InvertMatch: true, } m, err := c.NewMultiline() - assert.NoError(t, err, "Configuration was OK.") + require.NoError(t, err, "Configuration was OK.") matches1 := m.matchString("t=>") matches2 := m.matchString("t") - assert.False(t, matches1) - assert.True(t, matches2) + require.False(t, matches1) + require.True(t, matches2) } func TestMultilineWhat(t *testing.T) { var w1 MultilineMatchWhichLine - assert.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`))) - assert.Equal(t, Previous, w1) + require.NoError(t, w1.UnmarshalTOML([]byte(`"previous"`))) + require.Equal(t, Previous, w1) var w2 MultilineMatchWhichLine - assert.NoError(t, w2.UnmarshalTOML([]byte(`previous`))) - assert.Equal(t, Previous, w2) + require.NoError(t, w2.UnmarshalTOML([]byte(`previous`))) + require.Equal(t, Previous, w2) var w3 MultilineMatchWhichLine - assert.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`))) - assert.Equal(t, Previous, w3) + require.NoError(t, w3.UnmarshalTOML([]byte(`'previous'`))) + require.Equal(t, Previous, w3) var w4 MultilineMatchWhichLine - assert.NoError(t, w4.UnmarshalTOML([]byte(`"next"`))) - assert.Equal(t, Next, w4) + require.NoError(t, w4.UnmarshalTOML([]byte(`"next"`))) + require.Equal(t, Next, w4) var w5 MultilineMatchWhichLine - assert.NoError(t, w5.UnmarshalTOML([]byte(`next`))) - assert.Equal(t, Next, w5) + require.NoError(t, w5.UnmarshalTOML([]byte(`next`))) + require.Equal(t, Next, w5) var w6 MultilineMatchWhichLine - assert.NoError(t, w6.UnmarshalTOML([]byte(`'next'`))) - assert.Equal(t, Next, w6) + require.NoError(t, w6.UnmarshalTOML([]byte(`'next'`))) + require.Equal(t, Next, w6) var w7 MultilineMatchWhichLine - assert.Error(t, w7.UnmarshalTOML([]byte(`nope`))) - assert.Equal(t, MultilineMatchWhichLine(-1), w7) + require.Error(t, w7.UnmarshalTOML([]byte(`nope`))) + require.Equal(t, MultilineMatchWhichLine(-1), w7) } diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index 908ce1087e872..a6ff6a7ebbc9c 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -81,7 +80,7 @@ func TestTailBadLine(t *testing.T) { acc.Wait(1) tt.Stop() - assert.Contains(t, buf.String(), "Malformed log line") + require.Contains(t, buf.String(), "Malformed log line") } func TestTailDosLineEndings(t *testing.T) { @@ -137,7 +136,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { require.NoError(t, err) acc := testutil.Accumulator{} - assert.NoError(t, tt.Start(&acc)) + require.NoError(t, tt.Start(&acc)) defer tt.Stop() acc.Wait(3) @@ -168,7 +167,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { "loglevel": "ERROR", }) - assert.Equal(t, uint64(3), acc.NMetrics()) + require.Equal(t, uint64(3), acc.NMetrics()) } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { @@ -201,7 +200,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { require.NoError(t, err) acc := testutil.Accumulator{} - assert.NoError(t, tt.Start(&acc)) + require.NoError(t, tt.Start(&acc)) time.Sleep(11 * time.Millisecond) // will force timeout _, err = tmpfile.WriteString("[04/Jun/2016:12:41:48 +0100] INFO HelloExample: This is info\r\n") require.NoError(t, err) @@ -213,7 +212,7 @@ func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { require.NoError(t, tmpfile.Sync()) acc.Wait(3) tt.Stop() - assert.Equal(t, uint64(3), acc.NMetrics()) + require.Equal(t, uint64(3), acc.NMetrics()) expectedPath := tmpfile.Name() acc.AssertContainsTaggedFields(t, "tail_grok", @@ -254,9 +253,9 @@ func TestGrokParseLogFilesWithMultilineTailerCloseFlushesMultilineBuffer(t *test require.NoError(t, err) acc := testutil.Accumulator{} - assert.NoError(t, tt.Start(&acc)) + require.NoError(t, tt.Start(&acc)) acc.Wait(3) - assert.Equal(t, uint64(3), acc.NMetrics()) + require.Equal(t, uint64(3), acc.NMetrics()) // Close tailer, so multiline buffer is flushed tt.Stop() acc.Wait(4) @@ -561,7 +560,6 @@ func TestCharacterEncoding(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - plugin := &Tail{ Files: []string{filepath.Join(testdataDir, tt.testfiles)}, FromBeginning: tt.fromBeginning, diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index aedaa7276b41e..8eeaa9cff8091 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -3,7 +3,6 @@ package tcp_listener import ( "bufio" "fmt" - "log" "net" "sync" @@ -88,7 +87,7 @@ func (t *TCPListener) Start(acc telegraf.Accumulator) error { t.Lock() defer t.Unlock() - log.Println("W! DEPRECATED: the TCP listener plugin has been deprecated " + + t.Log.Warn("DEPRECATED: the TCP listener plugin has been deprecated " + "in favor of the socket_listener plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)") diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 9203318aff73e..e59115e6738b7 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -11,11 +11,10 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const ( @@ -96,10 +95,10 @@ func TestHighTrafficTCP(t *testing.T) { require.NoError(t, conn.(*net.TCPConn).CloseWrite()) buf := []byte{0} _, err = conn.Read(buf) - assert.Equal(t, err, io.EOF) + require.Equal(t, err, io.EOF) listener.Stop() - assert.Equal(t, 100000, int(acc.NMetrics())) + require.Equal(t, 100000, int(acc.NMetrics())) } func TestConnectTCP(t *testing.T) { @@ -168,14 +167,14 @@ func TestConcurrentConns(t *testing.T) { buf := make([]byte, 1500) n, err := conn.Read(buf) require.NoError(t, err) - assert.Equal(t, + require.Equal(t, "Telegraf maximum concurrent TCP connections (2) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", string(buf[:n])) _, err = conn.Read(buf) - assert.Equal(t, io.EOF, err) + require.Equal(t, io.EOF, err) } // Test that MaxTCPConnections is respected when max==1 @@ -203,14 +202,14 @@ func TestConcurrentConns1(t *testing.T) { buf := make([]byte, 1500) n, err := conn.Read(buf) require.NoError(t, err) - assert.Equal(t, + require.Equal(t, "Telegraf maximum concurrent TCP connections (1) reached, closing.\n"+ "You may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", string(buf[:n])) _, err = conn.Read(buf) - assert.Equal(t, io.EOF, err) + require.Equal(t, io.EOF, err) } // Test that MaxTCPConnections is respected diff --git a/plugins/inputs/tengine/tengine_test.go b/plugins/inputs/tengine/tengine_test.go index d91c97465aff1..595b8e3ba21f8 100644 --- a/plugins/inputs/tengine/tengine_test.go +++ b/plugins/inputs/tengine/tengine_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const tengineSampleResponse = `127.0.0.1,784,1511,2,2,1,0,1,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0` @@ -22,7 +22,7 @@ func TestTengineTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr, "127.0.0.1") - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index b4c4b52f85b6c..c629c37ff809b 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -44,7 +44,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { var stats map[string]interface{} if err = json.Unmarshal(body, &stats); err != nil { - return errors.New("Error decoding JSON response") + return errors.New("error decoding JSON response") } tags := make(map[string]string) @@ -124,11 +124,8 @@ func (t *Twemproxy) processServer( ) { fields := make(map[string]interface{}) for key, value := range data { - switch key { - default: - if val, ok := value.(float64); ok { - fields[key] = val - } + if val, ok := value.(float64); ok { + fields[key] = val } } acc.AddFields("twemproxy_pool_server", fields, tags) diff --git a/plugins/inputs/twemproxy/twemproxy_test.go b/plugins/inputs/twemproxy/twemproxy_test.go index 0da1694d557d8..8d5c5eef1be3c 100644 --- a/plugins/inputs/twemproxy/twemproxy_test.go +++ b/plugins/inputs/twemproxy/twemproxy_test.go @@ -5,8 +5,9 @@ import ( "net" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const sampleAddr = "127.0.0.1:22222" @@ -65,15 +66,12 @@ func mockTwemproxyServer() (net.Listener, error) { return nil, err } go func(l net.Listener) { - for { - conn, _ := l.Accept() - if _, err := conn.Write([]byte(sampleStats)); err != nil { - return - } - if err := conn.Close(); err != nil { - return - } - break + conn, _ := l.Accept() + if _, err := conn.Write([]byte(sampleStats)); err != nil { + return + } + if err := conn.Close(); err != nil { + return } }(listener) diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index 07cd79cb2a610..39fef79ce1b98 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -2,7 +2,6 @@ package udp_listener import ( "fmt" - "log" "net" "sync" "time" @@ -96,7 +95,7 @@ func (u *UDPListener) Start(acc telegraf.Accumulator) error { u.Lock() defer u.Unlock() - log.Println("W! DEPRECATED: the UDP listener plugin has been deprecated " + + u.Log.Warn("DEPRECATED: the UDP listener plugin has been deprecated " + "in favor of the socket_listener plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener)") @@ -172,8 +171,7 @@ func (u *UDPListener) udpListenLoop() { n, _, err := u.listener.ReadFromUDP(buf) if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - } else { + if err, ok := err.(net.Error); !ok || !err.Timeout() { u.Log.Error(err.Error()) } continue diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index d3900602441f1..e9994d7ebe4d6 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -4,8 +4,9 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func UnboundControl(output string) func(unbound Unbound) (*bytes.Buffer, error) { @@ -21,12 +22,12 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("unbound")) + require.True(t, acc.HasMeasurement("unbound")) - assert.Len(t, acc.Metrics, 1) - assert.Equal(t, acc.NFields(), 63) + require.Len(t, acc.Metrics, 1) + require.Equal(t, acc.NFields(), 63) acc.AssertContainsFields(t, "unbound", parsedFullOutput) } @@ -39,13 +40,13 @@ func TestParseFullOutputThreadAsTag(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("unbound")) - assert.True(t, acc.HasMeasurement("unbound_threads")) + require.True(t, acc.HasMeasurement("unbound")) + require.True(t, acc.HasMeasurement("unbound_threads")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, acc.NFields(), 63) + require.Len(t, acc.Metrics, 2) + require.Equal(t, acc.NFields(), 63) acc.AssertContainsFields(t, "unbound", parsedFullOutputThreadAsTagMeasurementUnbound) acc.AssertContainsFields(t, "unbound_threads", parsedFullOutputThreadAsTagMeasurementUnboundThreads) diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index f536e4b27c44f..9e260a5fe6974 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -78,20 +78,20 @@ func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { return nil } -func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { +func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, address *url.URL) error { var err error var r io.ReadCloser var s StatsServer - switch url.Scheme { + switch address.Scheme { case "tcp": - r, err = net.DialTimeout(url.Scheme, url.Host, time.Duration(u.Timeout)) + r, err = net.DialTimeout(address.Scheme, address.Host, time.Duration(u.Timeout)) if err != nil { return err } - s.source = url.Host + s.source = address.Host case "unix": - r, err = net.DialTimeout(url.Scheme, url.Path, time.Duration(u.Timeout)) + r, err = net.DialTimeout(address.Scheme, address.Path, time.Duration(u.Timeout)) if err != nil { return err } @@ -100,20 +100,20 @@ func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { s.source = "" } case "http": - resp, err := u.client.Get(url.String()) + resp, err := u.client.Get(address.String()) //nolint:bodyclose // response body is closed after switch if err != nil { return err } r = resp.Body - s.source = url.Host + s.source = address.Host default: - return fmt.Errorf("'%s' is not a supported scheme", url.Scheme) + return fmt.Errorf("'%s' is not a supported scheme", address.Scheme) } defer r.Close() if err := json.NewDecoder(r).Decode(&s); err != nil { - return fmt.Errorf("failed to decode json payload from '%s': %s", url.String(), err.Error()) + return fmt.Errorf("failed to decode json payload from '%s': %s", address.String(), err.Error()) } u.gatherStatServer(acc, &s) diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 088c08378c1ef..a5676e9d3789b 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" @@ -27,7 +27,7 @@ func TestGather(t *testing.T) { run: fakeVarnishStat(smOutput), Stats: []string{"*"}, } - assert.NoError(t, v.Gather(acc)) + require.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") for tag, fields := range parsedSmOutput { @@ -43,12 +43,12 @@ func TestParseFullOutput(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: []string{"*"}, } - assert.NoError(t, v.Gather(acc)) + require.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) - assert.Len(t, acc.Metrics, 6) - assert.Equal(t, 293, len(flat)) + require.Len(t, acc.Metrics, 6) + require.Equal(t, 293, len(flat)) } func TestFilterSomeStats(t *testing.T) { @@ -57,12 +57,12 @@ func TestFilterSomeStats(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: []string{"MGT.*", "VBE.*"}, } - assert.NoError(t, v.Gather(acc)) + require.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 16, len(flat)) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 16, len(flat)) } func TestFieldConfig(t *testing.T) { @@ -79,11 +79,11 @@ func TestFieldConfig(t *testing.T) { run: fakeVarnishStat(fullOutput), Stats: strings.Split(fieldCfg, ","), } - assert.NoError(t, v.Gather(acc)) + require.NoError(t, v.Gather(acc)) acc.HasMeasurement("varnish") flat := flatten(acc.Metrics) - assert.Equal(t, expected, len(flat)) + require.Equal(t, expected, len(flat)) } } diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index 9903647f8d4ee..b0aab7dfdd637 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -14,20 +14,20 @@ import ( "sync/atomic" "time" - "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/performance" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + + "github.com/influxdata/telegraf/filter" ) -var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") +var isolateLUN = regexp.MustCompile(`.*/([^/]+)/?$`) -var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$") +var isIPv4 = regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`) -var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") +var isIPv6 = regexp.MustCompile(`^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$`) const maxSampleConst = 10 // Absolute maximum number of samples regardless of period @@ -115,14 +115,14 @@ func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, boo // NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed // as parameters. -func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegraf.Logger) (*Endpoint, error) { +func NewEndpoint(ctx context.Context, parent *VSphere, address *url.URL, log telegraf.Logger) (*Endpoint, error) { e := Endpoint{ - URL: url, + URL: address, Parent: parent, - hwMarks: NewTSCache(hwMarkTTL), + hwMarks: NewTSCache(hwMarkTTL, log), lun2ds: make(map[string]string), initialized: false, - clientFactory: NewClientFactory(url, parent), + clientFactory: NewClientFactory(address, parent), customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), log: log, @@ -457,9 +457,6 @@ func (e *Endpoint) discover(ctx context.Context) error { SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) numRes += int64(len(objects)) } - if err != nil { - e.log.Error(err) - } } // Build lun2ds map @@ -584,11 +581,11 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, te.Wait() } -func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { +func getDatacenters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { var resources []mo.Datacenter ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() - err := filter.FindAll(ctx1, &resources) + err := resourceFilter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -605,11 +602,11 @@ func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (o return m, nil } -func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { +func getClusters(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { var resources []mo.ClusterComputeResource ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() - err := filter.FindAll(ctx1, &resources) + err := resourceFilter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -657,9 +654,9 @@ func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (obje } //noinspection GoUnusedParameter -func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { +func getHosts(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { var resources []mo.HostSystem - err := filter.FindAll(ctx, &resources) + err := resourceFilter.FindAll(ctx, &resources) if err != nil { return nil, err } @@ -675,11 +672,11 @@ func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectM return m, nil } -func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { +func getVMs(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { var resources []mo.VirtualMachine ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() - err := filter.FindAll(ctx1, &resources) + err := resourceFilter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -765,11 +762,11 @@ func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap return m, nil } -func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { +func getDatastores(ctx context.Context, e *Endpoint, resourceFilter *ResourceFilter) (objectMap, error) { var resources []mo.Datastore ctx1, cancel1 := context.WithTimeout(ctx, time.Duration(e.Parent.Timeout)) defer cancel1() - err := filter.FindAll(ctx1, &resources) + err := resourceFilter.FindAll(ctx1, &resources) if err != nil { return nil, err } @@ -888,7 +885,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) numQs := 0 - for _, object := range res.objects { + for _, obj := range res.objects { timeBuckets := make(map[int64]*types.PerfQuerySpec) for metricIdx, metric := range res.metrics { // Determine time of last successful collection @@ -897,7 +894,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim e.log.Infof("Unable to find metric name for id %d. Skipping!", metric.CounterId) continue } - start, ok := e.hwMarks.Get(object.ref.Value, metricName) + start, ok := e.hwMarks.Get(obj.ref.Value, metricName) if !ok { start = latest.Add(time.Duration(-res.sampling) * time.Second * (time.Duration(e.Parent.MetricLookback) - 1)) } @@ -907,7 +904,7 @@ func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Tim bucket, ok := timeBuckets[start.Unix()] if !ok { bucket = &types.PerfQuerySpec{ - Entity: object.ref, + Entity: obj.ref, MaxSample: maxSampleConst, MetricId: make([]types.PerfMetricId, 0), IntervalId: res.sampling, @@ -1272,7 +1269,7 @@ func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resou } } -func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) { +func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (metricName string, fieldName string) { parts := strings.Split(metric, ".") if len(parts) == 1 { return prefix, parts[0] diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go index 8414ad8d81285..0af9ef91268de 100644 --- a/plugins/inputs/vsphere/finder.go +++ b/plugins/inputs/vsphere/finder.go @@ -35,14 +35,14 @@ type ResourceFilter struct { func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error { objs := make(map[string]types.ObjectContent) for _, p := range paths { - if err := f.find(ctx, resType, p, objs); err != nil { + if err := f.findResources(ctx, resType, p, objs); err != nil { return err } } if len(excludePaths) > 0 { excludes := make(map[string]types.ObjectContent) for _, p := range excludePaths { - if err := f.find(ctx, resType, p, excludes); err != nil { + if err := f.findResources(ctx, resType, p, excludes); err != nil { return err } } @@ -56,14 +56,14 @@ func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePath // Find returns the resources matching the specified path. func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error { objs := make(map[string]types.ObjectContent) - err := f.find(ctx, resType, path, objs) + err := f.findResources(ctx, resType, path, objs) if err != nil { return err } return objectContentToTypedArray(objs, dst) } -func (f *Finder) find(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error { +func (f *Finder) findResources(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error { p := strings.Split(path, "/") flt := make([]property.Filter, len(p)-1) for i := 1; i < len(p); i++ { @@ -107,7 +107,7 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, fields := []string{"name"} recurse := tokens[pos]["name"] == "**" - types := ct + objectTypes := ct if isLeaf { if af, ok := addFields[resType]; ok { fields = append(fields, af...) @@ -131,9 +131,9 @@ func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, } return nil } - types = []string{resType} // Only load wanted object type at leaf level + objectTypes = []string{resType} // Only load wanted object type at leaf level } - err = v.Retrieve(ctx, types, fields, &content) + err = v.Retrieve(ctx, objectTypes, fields, &content) if err != nil { return err } diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go index c312260c85b9b..78303b45df3c0 100644 --- a/plugins/inputs/vsphere/tscache.go +++ b/plugins/inputs/vsphere/tscache.go @@ -1,9 +1,10 @@ package vsphere import ( - "log" "sync" "time" + + "github.com/influxdata/telegraf" ) // TSCache is a cache of timestamps used to determine the validity of datapoints @@ -11,13 +12,15 @@ type TSCache struct { ttl time.Duration table map[string]time.Time mux sync.RWMutex + log telegraf.Logger } // NewTSCache creates a new TSCache with a specified time-to-live after which timestamps are discarded. -func NewTSCache(ttl time.Duration) *TSCache { +func NewTSCache(ttl time.Duration, log telegraf.Logger) *TSCache { return &TSCache{ ttl: ttl, table: make(map[string]time.Time), + log: log, } } @@ -32,7 +35,7 @@ func (t *TSCache) Purge() { n++ } } - log.Printf("D! [inputs.vsphere] purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) + t.log.Debugf("purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) } // IsNew returns true if the supplied timestamp for the supplied key is more recent than the @@ -56,10 +59,10 @@ func (t *TSCache) Get(key string, metricName string) (time.Time, bool) { } // Put updates the latest timestamp for the supplied key. -func (t *TSCache) Put(key string, metricName string, time time.Time) { +func (t *TSCache) Put(key string, metricName string, timestamp time.Time) { t.mux.Lock() defer t.mux.Unlock() - t.table[makeKey(key, metricName)] = time + t.table[makeKey(key, metricName)] = timestamp } func makeKey(resource string, metric string) string { diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go index 31bb0fdf08844..2e8654efcd6c6 100644 --- a/plugins/inputs/vsphere/vsphere_test.go +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -11,15 +11,16 @@ import ( "time" "unsafe" - "github.com/influxdata/telegraf/config" - itls "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/stretchr/testify/require" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/simulator" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + + "github.com/influxdata/telegraf/config" + itls "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/testutil" ) var configHeader = ` @@ -229,7 +230,6 @@ func TestParseConfig(t *testing.T) { tab, err := toml.Parse([]byte(c)) require.NoError(t, err) require.NotNil(t, tab) - } func TestConfigDurationParsing(t *testing.T) { @@ -313,6 +313,7 @@ func TestFinder(t *testing.T) { ctx := context.Background() c, err := NewClient(ctx, s.URL, v) + require.NoError(t, err) f := Finder{c} @@ -429,6 +430,7 @@ func TestFolders(t *testing.T) { v := defaultVSphere() c, err := NewClient(ctx, s.URL, v) + require.NoError(t, err) f := Finder{c} @@ -449,7 +451,7 @@ func TestFolders(t *testing.T) { testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/**", 4, "") } -func TestCollection(t *testing.T) { +func TestCollectionWithClusterMetrics(t *testing.T) { testCollection(t, false) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index a5ae58370ab4a..c7ceec815f0f8 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -6,14 +6,13 @@ package win_perf_counters import ( "errors" "fmt" + "strings" "testing" "time" - "strings" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestWinPerformanceQueryImplIntegration(t *testing.T) { @@ -30,15 +29,15 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { _, err = query.AddCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialized")) + require.True(t, strings.Contains(err.Error(), "uninitialized")) _, err = query.AddEnglishCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialized")) + require.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.CollectData() require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialized")) + require.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.Open() require.NoError(t, err) @@ -47,7 +46,7 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { hCounter, err = query.AddCounterToQuery(counterPath) require.NoError(t, err) - assert.NotEqual(t, 0, hCounter) + require.NotEqual(t, 0, hCounter) err = query.Close() require.NoError(t, err) @@ -57,11 +56,11 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { hCounter, err = query.AddEnglishCounterToQuery(counterPath) require.NoError(t, err) - assert.NotEqual(t, 0, hCounter) + require.NotEqual(t, 0, hCounter) cp, err := query.GetCounterPath(hCounter) require.NoError(t, err) - assert.True(t, strings.HasSuffix(cp, counterPath)) + require.True(t, strings.HasSuffix(cp, counterPath)) err = query.CollectData() require.NoError(t, err) @@ -76,19 +75,19 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { now := time.Now() mtime, err := query.CollectDataWithTime() require.NoError(t, err) - assert.True(t, mtime.Sub(now) < time.Second) + require.True(t, mtime.Sub(now) < time.Second) counterPath = "\\Process(*)\\% Processor Time" paths, err := query.ExpandWildCardPath(counterPath) require.NoError(t, err) require.NotNil(t, paths) - assert.True(t, len(paths) > 1) + require.True(t, len(paths) > 1) counterPath = "\\Process(_Total)\\*" paths, err = query.ExpandWildCardPath(counterPath) require.NoError(t, err) require.NotNil(t, paths) - assert.True(t, len(paths) > 1) + require.True(t, len(paths) > 1) err = query.Open() require.NoError(t, err) @@ -96,7 +95,7 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { counterPath = "\\Process(*)\\% Processor Time" hCounter, err = query.AddEnglishCounterToQuery(counterPath) require.NoError(t, err) - assert.NotEqual(t, 0, hCounter) + require.NotEqual(t, 0, hCounter) err = query.CollectData() require.NoError(t, err) @@ -111,7 +110,7 @@ func TestWinPerformanceQueryImplIntegration(t *testing.T) { arr, err = query.GetFormattedCounterArrayDouble(hCounter) } require.NoError(t, err) - assert.True(t, len(arr) > 0, "Too") + require.True(t, len(arr) > 0, "Too") err = query.Close() require.NoError(t, err) @@ -566,11 +565,11 @@ func TestWinPerfcountersCollect1Integration(t *testing.T) { time.Sleep(2000 * time.Millisecond) err = m.Gather(&acc) require.NoError(t, err) - assert.Len(t, acc.Metrics, 2) + require.Len(t, acc.Metrics, 2) for _, metric := range acc.Metrics { _, ok := metric.Fields[expectedCounter] - assert.True(t, ok) + require.True(t, ok) } } @@ -613,11 +612,11 @@ func TestWinPerfcountersCollect2Integration(t *testing.T) { err = m.Gather(&acc) require.NoError(t, err) - assert.Len(t, acc.Metrics, 4) + require.Len(t, acc.Metrics, 4) for _, metric := range acc.Metrics { _, ok := metric.Fields[expectedCounter] - assert.True(t, ok) + require.True(t, ok) } } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 969b518d0f2b0..998423e792db9 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type testCounter struct { @@ -237,7 +237,7 @@ func TestCounterPathParsing(t *testing.T) { for path, vals := range counterPathsAndRes { o, i, c, err := extractCounterInfoFromCounterPath(path) require.NoError(t, err) - require.True(t, assert.ObjectsAreEqual(vals, []string{o, i, c}), "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) + require.Equalf(t, vals, []string{o, i, c}, "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) } for _, path := range invalidCounterPaths { _, _, _, err := extractCounterInfoFromCounterPath(path) @@ -312,7 +312,7 @@ func TestParseConfigBasic(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 4) + require.Len(t, m.counters, 4) err = m.query.Close() require.NoError(t, err) @@ -323,7 +323,7 @@ func TestParseConfigBasic(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 4) + require.Len(t, m.counters, 4) err = m.query.Close() require.NoError(t, err) } @@ -349,7 +349,7 @@ func TestParseConfigNoInstance(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 2) + require.Len(t, m.counters, 2) err = m.query.Close() require.NoError(t, err) @@ -360,7 +360,7 @@ func TestParseConfigNoInstance(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 2) + require.Len(t, m.counters, 2) err = m.query.Close() require.NoError(t, err) } @@ -456,7 +456,7 @@ func TestParseConfigTotalExpansion(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 4) + require.Len(t, m.counters, 4) err = m.query.Close() require.NoError(t, err) @@ -478,7 +478,7 @@ func TestParseConfigTotalExpansion(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 2) + require.Len(t, m.counters, 2) err = m.query.Close() require.NoError(t, err) } @@ -503,7 +503,7 @@ func TestParseConfigExpand(t *testing.T) { require.NoError(t, err) err = m.ParseConfig() require.NoError(t, err) - assert.Len(t, m.counters, 4) + require.Len(t, m.counters, 4) err = m.query.Close() require.NoError(t, err) } @@ -629,7 +629,7 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { "objectname": "O", } acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) - assert.True(t, acc1.HasTimestamp(measurement, MetricTime)) + require.True(t, acc1.HasTimestamp(measurement, MetricTime)) } func TestGatherError(t *testing.T) { @@ -739,9 +739,9 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { } var acc1 testutil.Accumulator err = m.Gather(&acc1) - assert.Len(t, m.counters, 4) + require.Len(t, m.counters, 4) require.NoError(t, err) - assert.Len(t, acc1.Metrics, 2) + require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ "C1": float32(1.1), @@ -786,8 +786,8 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { //test before elapsing CounterRefreshRate counters are not refreshed err = m.Gather(&acc2) require.NoError(t, err) - assert.Len(t, m.counters, 4) - assert.Len(t, acc2.Metrics, 2) + require.Len(t, m.counters, 4) + require.Len(t, acc2.Metrics, 2) acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) @@ -797,7 +797,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { var acc3 testutil.Accumulator err = m.Gather(&acc3) require.NoError(t, err) - assert.Len(t, acc3.Metrics, 3) + require.Len(t, acc3.Metrics, 3) acc3.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc3.AssertContainsTaggedFields(t, measurement, fields2, tags2) @@ -831,9 +831,9 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { CountersRefreshInterval: config.Duration(time.Second * 10)} var acc1 testutil.Accumulator err = m.Gather(&acc1) - assert.Len(t, m.counters, 2) + require.Len(t, m.counters, 2) require.NoError(t, err) - assert.Len(t, acc1.Metrics, 2) + require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ "C1": float32(1.1), @@ -880,8 +880,8 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { //test before elapsing CounterRefreshRate counters are not refreshed err = m.Gather(&acc2) require.NoError(t, err) - assert.Len(t, m.counters, 2) - assert.Len(t, acc2.Metrics, 3) + require.Len(t, m.counters, 2) + require.Len(t, acc2.Metrics, 3) acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) acc2.AssertContainsTaggedFields(t, measurement, fields2, tags2) @@ -908,7 +908,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { var acc3 testutil.Accumulator err = m.Gather(&acc3) require.NoError(t, err) - assert.Len(t, acc3.Metrics, 2) + require.Len(t, acc3.Metrics, 2) fields4 := map[string]interface{}{ "C1": float32(1.1), "C2": float32(1.2), @@ -954,8 +954,8 @@ func TestGatherTotalNoExpansion(t *testing.T) { var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) - assert.Len(t, m.counters, 2) - assert.Len(t, acc1.Metrics, 2) + require.Len(t, m.counters, 2) + require.Len(t, acc1.Metrics, 2) fields1 := map[string]interface{}{ "C1": float32(1.1), "C2": float32(1.2), @@ -984,8 +984,8 @@ func TestGatherTotalNoExpansion(t *testing.T) { var acc2 testutil.Accumulator err = m.Gather(&acc2) require.NoError(t, err) - assert.Len(t, m.counters, 2) - assert.Len(t, acc2.Metrics, 1) + require.Len(t, m.counters, 2) + require.Len(t, acc2.Metrics, 1) acc2.AssertContainsTaggedFields(t, measurement, fields1, tags1) @@ -1013,14 +1013,14 @@ var stringArraySingleItem = []string{ func TestUTF16ToStringArray(t *testing.T) { singleItem := UTF16ToStringArray(unicodeStringListSingleItem) - assert.True(t, assert.ObjectsAreEqual(singleItem, stringArraySingleItem), "Not equal single arrays") + require.Equal(t, singleItem, stringArraySingleItem, "Not equal single arrays") noItem := UTF16ToStringArray(unicodeStringListNoItem) - assert.Nil(t, noItem) + require.Nil(t, noItem) engStrings := UTF16ToStringArray(unicodeStringListWithEnglishChars) - assert.True(t, assert.ObjectsAreEqual(engStrings, stringArrayWithEnglishChars), "Not equal eng arrays") + require.Equal(t, engStrings, stringArrayWithEnglishChars, "Not equal eng arrays") czechStrings := UTF16ToStringArray(unicodeStringListWithCzechChars) - assert.True(t, assert.ObjectsAreEqual(czechStrings, stringArrayWithCzechChars), "Not equal czech arrays") + require.Equal(t, czechStrings, stringArrayWithCzechChars, "Not equal czech arrays") } diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 69a75372dd086..153c8dfdd8a10 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -10,11 +10,11 @@ import ( "log" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" + + "github.com/influxdata/telegraf/testutil" ) //testData is DD wrapper for unit testing of WinServices @@ -136,8 +136,8 @@ func TestBasicInfo(t *testing.T) { mgrProvider: &FakeMgProvider{testErrors[0]}, } winServices.Init() - assert.NotEmpty(t, winServices.SampleConfig()) - assert.NotEmpty(t, winServices.Description()) + require.NotEmpty(t, winServices.SampleConfig()) + require.NotEmpty(t, winServices.Description()) } func TestMgrErrors(t *testing.T) { @@ -149,7 +149,7 @@ func TestMgrErrors(t *testing.T) { var acc1 testutil.Accumulator err := winServices.Gather(&acc1) require.Error(t, err) - assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) + require.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) ////mgr.listServices error winServices = &WinServices{ @@ -159,7 +159,7 @@ func TestMgrErrors(t *testing.T) { var acc2 testutil.Accumulator err = winServices.Gather(&acc2) require.Error(t, err) - assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) + require.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) ////mgr.listServices error 2 winServices = &WinServices{ @@ -213,7 +213,7 @@ func TestGatherContainsTag(t *testing.T) { winServices.Init() var acc1 testutil.Accumulator require.NoError(t, winServices.Gather(&acc1)) - assert.Len(t, acc1.Errors, 0, "There should be no errors after gather") + require.Len(t, acc1.Errors, 0, "There should be no errors after gather") for _, s := range testSimpleData[0].services { fields := make(map[string]interface{}) diff --git a/plugins/inputs/wireguard/wireguard.go b/plugins/inputs/wireguard/wireguard.go index ded3328378230..068fe8c53a75f 100644 --- a/plugins/inputs/wireguard/wireguard.go +++ b/plugins/inputs/wireguard/wireguard.go @@ -2,12 +2,12 @@ package wireguard import ( "fmt" - "log" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "golang.zx2c4.com/wireguard/wgctrl" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) const ( @@ -26,7 +26,8 @@ var ( // Wireguard is an input that enumerates all Wireguard interfaces/devices on // the host, and reports gauge metrics for the device itself and its peers. type Wireguard struct { - Devices []string `toml:"devices"` + Devices []string `toml:"devices"` + Log telegraf.Logger `toml:"-"` client *wgctrl.Client } @@ -81,7 +82,7 @@ func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) { for _, name := range wg.Devices { dev, err := wg.client.Device(name) if err != nil { - log.Printf("W! [inputs.wireguard] No Wireguard device found with name %s", name) + wg.Log.Warnf("No Wireguard device found with name %s", name) continue } diff --git a/plugins/inputs/wireguard/wireguard_test.go b/plugins/inputs/wireguard/wireguard_test.go index 0cfdba75df50c..1eb8c308c4495 100644 --- a/plugins/inputs/wireguard/wireguard_test.go +++ b/plugins/inputs/wireguard/wireguard_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.zx2c4.com/wireguard/wgctrl/wgtypes" + + "github.com/influxdata/telegraf/testutil" ) func TestWireguard_gatherDeviceMetrics(t *testing.T) { @@ -36,7 +37,7 @@ func TestWireguard_gatherDeviceMetrics(t *testing.T) { wg.gatherDeviceMetrics(&acc, device) - assert.Equal(t, 3, acc.NFields()) + require.Equal(t, 3, acc.NFields()) acc.AssertDoesNotContainMeasurement(t, measurementPeer) acc.AssertContainsTaggedFields(t, measurementDevice, expectFields, expectTags) acc.AssertContainsTaggedFields(t, measurementDevice, expectGauges, expectTags) @@ -77,7 +78,7 @@ func TestWireguard_gatherDevicePeerMetrics(t *testing.T) { wg.gatherDevicePeerMetrics(&acc, device, peer) - assert.Equal(t, 6, acc.NFields()) + require.Equal(t, 6, acc.NFields()) acc.AssertDoesNotContainMeasurement(t, measurementDevice) acc.AssertContainsTaggedFields(t, measurementPeer, expectFields, expectTags) acc.AssertContainsTaggedFields(t, measurementPeer, expectGauges, expectTags) diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 29a0250d92b7f..dcca07c95c8b4 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -5,7 +5,6 @@ package wireless import ( "bytes" - "log" "os" "path" "strconv" @@ -51,7 +50,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { return err } - interfaces, err := loadWirelessTable(table) + interfaces, err := w.loadWirelessTable(table) if err != nil { return err } @@ -80,8 +79,8 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { return nil } -func loadWirelessTable(table []byte) ([]*wirelessInterface, error) { - var w []*wirelessInterface +func (w *Wireless) loadWirelessTable(table []byte) ([]*wirelessInterface, error) { + var wi []*wirelessInterface lines := bytes.Split(table, newLineByte) // iterate over interfaces @@ -99,10 +98,10 @@ func loadWirelessTable(table []byte) ([]*wirelessInterface, error) { values = append(values, v) } if len(values) != interfaceFieldLength { - log.Printf("E! [input.wireless] invalid length of interface values") + w.Log.Error("invalid length of interface values") continue } - w = append(w, &wirelessInterface{ + wi = append(wi, &wirelessInterface{ Interface: strings.Trim(fields[0], ":"), Status: values[0], Link: values[1], @@ -116,7 +115,7 @@ func loadWirelessTable(table []byte) ([]*wirelessInterface, error) { Beacon: values[9], }) } - return w, nil + return wi, nil } // loadPath can be used to read path firstly from config @@ -128,13 +127,13 @@ func (w *Wireless) loadPath() { } // proc can be used to read file paths from env -func proc(env, path string) string { +func proc(env, defaultPath string) string { // try to read full file path if p := os.Getenv(env); p != "" { return p } // return default path - return path + return defaultPath } func init() { diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index 20c10de88a347..dbf2f7aead031 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -6,7 +6,9 @@ package wireless import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var testInput = []byte(`Inter-| sta-| Quality | Discarded packets | Missed | WE @@ -43,11 +45,13 @@ func TestLoadWirelessTable(t *testing.T) { Beacon: int64(0), }, } - metrics, err := loadWirelessTable(testInput) - if err != nil { - t.Fatal(err) + + w := Wireless{ + Log: testutil.Logger{}, } + metrics, err := w.loadWirelessTable(testInput) + require.NoError(t, err) - as := assert.New(t) + as := require.New(t) as.Equal(metrics, expectedMetrics) } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index f0b0379109749..8e6ece05f45bf 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -14,8 +14,6 @@ import ( "time" "github.com/pion/dtls/v2" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -199,30 +197,28 @@ func TestTags(t *testing.T) { acc := testutil.Accumulator{} require.NoError(t, sc.Gather(&acc)) - assert.True(t, acc.HasMeasurement("x509_cert")) + require.True(t, acc.HasMeasurement("x509_cert")) - assert.True(t, acc.HasTag("x509_cert", "common_name")) - assert.Equal(t, "server.localdomain", acc.TagValue("x509_cert", "common_name")) + require.True(t, acc.HasTag("x509_cert", "common_name")) + require.Equal(t, "server.localdomain", acc.TagValue("x509_cert", "common_name")) - assert.True(t, acc.HasTag("x509_cert", "signature_algorithm")) - assert.Equal(t, "SHA256-RSA", acc.TagValue("x509_cert", "signature_algorithm")) + require.True(t, acc.HasTag("x509_cert", "signature_algorithm")) + require.Equal(t, "SHA256-RSA", acc.TagValue("x509_cert", "signature_algorithm")) - assert.True(t, acc.HasTag("x509_cert", "public_key_algorithm")) - assert.Equal(t, "RSA", acc.TagValue("x509_cert", "public_key_algorithm")) + require.True(t, acc.HasTag("x509_cert", "public_key_algorithm")) + require.Equal(t, "RSA", acc.TagValue("x509_cert", "public_key_algorithm")) - assert.True(t, acc.HasTag("x509_cert", "issuer_common_name")) - assert.Equal(t, "Telegraf Test CA", acc.TagValue("x509_cert", "issuer_common_name")) + require.True(t, acc.HasTag("x509_cert", "issuer_common_name")) + require.Equal(t, "Telegraf Test CA", acc.TagValue("x509_cert", "issuer_common_name")) - assert.True(t, acc.HasTag("x509_cert", "san")) - assert.Equal(t, "localhost,127.0.0.1", acc.TagValue("x509_cert", "san")) + require.True(t, acc.HasTag("x509_cert", "san")) + require.Equal(t, "localhost,127.0.0.1", acc.TagValue("x509_cert", "san")) - assert.True(t, acc.HasTag("x509_cert", "serial_number")) + require.True(t, acc.HasTag("x509_cert", "serial_number")) serialNumber := new(big.Int) _, validSerialNumber := serialNumber.SetString(acc.TagValue("x509_cert", "serial_number"), 16) - if !validSerialNumber { - t.Errorf("Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number")) - } - assert.Equal(t, big.NewInt(1), serialNumber) + require.Truef(t, validSerialNumber, "Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number")) + require.Equal(t, big.NewInt(1), serialNumber) } func TestGatherChain(t *testing.T) { @@ -288,8 +284,8 @@ func TestGatherUDPCert(t *testing.T) { var acc testutil.Accumulator require.NoError(t, m.Gather(&acc)) - assert.Len(t, acc.Errors, 0) - assert.True(t, acc.HasMeasurement("x509_cert")) + require.Len(t, acc.Errors, 0) + require.True(t, acc.HasMeasurement("x509_cert")) } func TestStrings(t *testing.T) { @@ -328,7 +324,7 @@ func TestGatherCertIntegration(t *testing.T) { var acc testutil.Accumulator require.NoError(t, m.Gather(&acc)) - assert.True(t, acc.HasMeasurement("x509_cert")) + require.True(t, acc.HasMeasurement("x509_cert")) } func TestGatherCertMustNotTimeout(t *testing.T) { @@ -345,7 +341,7 @@ func TestGatherCertMustNotTimeout(t *testing.T) { var acc testutil.Accumulator require.NoError(t, m.Gather(&acc)) require.Empty(t, acc.Errors) - assert.True(t, acc.HasMeasurement("x509_cert")) + require.True(t, acc.HasMeasurement("x509_cert")) } func TestSourcesToURLs(t *testing.T) { @@ -354,8 +350,8 @@ func TestSourcesToURLs(t *testing.T) { } require.NoError(t, m.Init()) - assert.Equal(t, len(m.globpaths), 2) - assert.Equal(t, len(m.locations), 2) + require.Equal(t, len(m.globpaths), 2) + require.Equal(t, len(m.locations), 2) } func TestServerName(t *testing.T) { @@ -385,11 +381,11 @@ func TestServerName(t *testing.T) { require.NoError(t, err) actual, err := sc.serverName(u) if test.err { - assert.Error(t, err) + require.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) } - assert.Equal(t, test.expected, actual) + require.Equal(t, test.expected, actual) }) } } diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index e679de5c47223..c230736ad195f 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/zipkin/trace" @@ -108,8 +109,8 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error { z.address = ln.Addr().String() z.Log.Infof("Started the zipkin listener on %s", z.address) + wg.Add(1) go func() { - wg.Add(1) defer wg.Done() z.Listen(ln, acc) @@ -140,7 +141,7 @@ func (z *Zipkin) Listen(ln net.Listener, acc telegraf.Accumulator) { // This interferes with telegraf's internal data collection, // by making it appear as if a serious error occurred. if err != http.ErrServerClosed { - acc.AddError(fmt.Errorf("E! Error listening: %v", err)) + acc.AddError(fmt.Errorf("error listening: %v", err)) } } } diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 0c0bab279cc7f..f2f07c5723db8 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -649,10 +650,12 @@ func postThriftData(datafile, address, contentType string) error { req.Header.Set("Content-Type", contentType) client := &http.Client{} - _, err = client.Do(req) + resp, err := client.Do(req) if err != nil { return fmt.Errorf("HTTP POST request to zipkin endpoint %s failed %v", address, err) } + defer resp.Body.Close() + return nil } diff --git a/plugins/inputs/zookeeper/zookeeper_test.go b/plugins/inputs/zookeeper/zookeeper_test.go index bbc2a37cb5cb4..f2e3edcf7e3c8 100644 --- a/plugins/inputs/zookeeper/zookeeper_test.go +++ b/plugins/inputs/zookeeper/zookeeper_test.go @@ -3,9 +3,9 @@ package zookeeper import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestZookeeperGeneratesMetricsIntegration(t *testing.T) { @@ -37,6 +37,6 @@ func TestZookeeperGeneratesMetricsIntegration(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("zookeeper", metric), metric) + require.True(t, acc.HasInt64Field("zookeeper", metric), metric) } } From 2a0c3059a1c6b1ab6c027b44d7c456f250ad6f95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 18 Nov 2021 17:04:52 +0100 Subject: [PATCH 049/133] fix: Linter fixes for plugins/inputs/s* (#10104) Co-authored-by: Pawel Zak --- plugins/inputs/sensors/sensors_test.go | 4 +- plugins/inputs/snmp/snmp.go | 10 ++-- plugins/inputs/snmp/snmp_mocks_test.go | 3 + plugins/inputs/snmp/snmp_test.go | 3 +- plugins/inputs/snmp_legacy/snmp_legacy.go | 30 +++++----- plugins/inputs/snmp_trap/snmp_trap_test.go | 6 +- .../socket_listener/socket_listener_test.go | 59 +++++++++++-------- plugins/inputs/solr/solr.go | 20 +++---- plugins/inputs/sqlserver/sqlserver.go | 48 +++++++-------- plugins/inputs/sqlserver/sqlserver_test.go | 20 +++++-- plugins/inputs/stackdriver/stackdriver.go | 44 +++++++------- plugins/inputs/statsd/datadog.go | 5 +- plugins/inputs/statsd/statsd.go | 16 +++-- plugins/inputs/statsd/statsd_test.go | 39 ++++++------ plugins/inputs/suricata/suricata_test.go | 7 ++- plugins/inputs/synproxy/synproxy_test.go | 16 ++--- plugins/inputs/syslog/syslog.go | 11 ++-- plugins/inputs/sysstat/sysstat.go | 8 +-- plugins/inputs/sysstat/sysstat_test.go | 4 +- plugins/inputs/system/ps.go | 16 ++--- 20 files changed, 199 insertions(+), 170 deletions(-) diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index be4cace6eab79..fe1d62ceceeb0 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -367,7 +367,7 @@ Vcore Voltage: // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd, _ := args[3], args[4:] if cmd == "sensors" { //nolint:errcheck,revive @@ -375,7 +375,9 @@ Vcore Voltage: } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a2259e88179c2..c4a2b80b28174 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -16,6 +16,7 @@ import ( "time" "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" @@ -679,7 +680,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case float32: v = float64(vt) / math.Pow10(d) case float64: - v = float64(vt) / math.Pow10(d) + v = vt / math.Pow10(d) case int: v = float64(vt) / math.Pow10(d) case int8: @@ -766,7 +767,8 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return v, nil } - if endian == "LittleEndian" { + switch endian { + case "LittleEndian": switch bit { case "uint64": v = binary.LittleEndian.Uint64(bv) @@ -777,7 +779,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { default: return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) } - } else if endian == "BigEndian" { + case "BigEndian": switch bit { case "uint64": v = binary.BigEndian.Uint64(bv) @@ -788,7 +790,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { default: return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) } - } else { + default: return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) } diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 1927db23246b4..850f6b83830bc 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -46,6 +46,7 @@ func TestMockExecCommand(_ *testing.T) { cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) + //nolint:revive // error code is important for this "test" os.Exit(1) } //nolint:errcheck,revive @@ -53,8 +54,10 @@ func TestMockExecCommand(_ *testing.T) { //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "%s", mcr.stderr) if mcr.exitError { + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 49c9bf381b107..b5345248441ad 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -850,11 +850,12 @@ func TestFieldConvert(t *testing.T) { conv string expected interface{} }{ - {[]byte("foo"), "", string("foo")}, + {[]byte("foo"), "", "foo"}, {"0.123", "float", float64(0.123)}, {[]byte("0.123"), "float", float64(0.123)}, {float32(0.123), "float", float64(float32(0.123))}, {float64(0.123), "float", float64(0.123)}, + {float64(0.123123123123), "float", float64(0.123123123123)}, {123, "float", float64(123)}, {123, "float(0)", float64(123)}, {123, "float(4)", float64(0.0123)}, diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 604a2205c0d2c..ce454cbfbad36 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -8,10 +8,10 @@ import ( "strings" "time" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/gosnmp/gosnmp" ) // Snmp is a snmp plugin @@ -46,9 +46,9 @@ type Host struct { // Table Table []HostTable // Oids - getOids []Data - bulkOids []Data - tables []HostTable + internalGetOids []Data + bulkOids []Data + tables []HostTable // array of processed oids // to skip oid duplication processedOids []string @@ -250,7 +250,7 @@ func fillnode(parentNode Node, oidName string, ids []string) { } } -func findnodename(node Node, ids []string) (string, string) { +func findNodeName(node Node, ids []string) (oidName string, instance string) { // ids = ["1", "3", "6", ...] if len(ids) == 1 { return node.name, ids[0] @@ -259,7 +259,7 @@ func findnodename(node Node, ids []string) (string, string) { // Get node subnode, ok := node.subnodes[id] if ok { - return findnodename(subnode, ids) + return findNodeName(subnode, ids) } // We got a node // Get node name @@ -345,7 +345,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { oid.rawOid = oidstring } } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } for _, oidName := range host.Collect { @@ -362,7 +362,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.rawOid = oid.Oid } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } } // Get GETBULK oids @@ -463,7 +463,7 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list + // Add the new oid to bulkOids list h.bulkOids = append(h.bulkOids, oid) } } @@ -569,8 +569,8 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list - h.getOids = append(h.getOids, oid) + // Add the new oid to internalGetOids list + h.internalGetOids = append(h.internalGetOids, oid) } } default: @@ -606,7 +606,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { defer snmpClient.Conn.Close() // Prepare OIDs oidsList := make(map[string]Data) - for _, oid := range h.getOids { + for _, oid := range h.internalGetOids { oidsList[oid.rawOid] = oid } oidsNameList := make([]string, 0, len(oidsList)) @@ -701,7 +701,7 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { // Prepare host and port host, portStr, err := net.SplitHostPort(h.Address) if err != nil { - portStr = string("161") + portStr = "161" } // convert port_str to port in uint16 port64, err := strconv.ParseUint(portStr, 10, 16) @@ -763,7 +763,7 @@ func (h *Host) HandleResponse( var oidName string var instance string // Get oidname and instance from translate file - oidName, instance = findnodename(initNode, + oidName, instance = findNodeName(initNode, strings.Split(variable.Name[1:], ".")) // Set instance tag // From mapping table diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index f917a7bbff918..19e9f99bda899 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -10,11 +10,10 @@ import ( "time" "github.com/gosnmp/gosnmp" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" ) func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { @@ -1267,7 +1266,7 @@ func TestReceiveTrap(t *testing.T) { return mibEntry{entry.e.mibName, entry.e.oidText}, nil } } - return mibEntry{}, fmt.Errorf("Unexpected oid") + return mibEntry{}, fmt.Errorf("unexpected oid") }, //if cold start be answer otherwise err Log: testutil.Logger{}, @@ -1311,7 +1310,6 @@ func TestReceiveTrap(t *testing.T) { testutil.SortMetrics()) }) } - } func TestGosmiSingleMib(t *testing.T) { diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index a3ccacae1ceb2..1d363d8504669 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -12,19 +12,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/wlog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") -// testEmptyLog is a helper function to ensure no data is written to log. +// prepareLog is a helper function to ensure no data is written to log. // Should be called at the start of the test, and returns a function which should run at the end. -func testEmptyLog(t *testing.T) func() { +func prepareLog(t *testing.T) func() { buf := bytes.NewBuffer(nil) log.SetOutput(wlog.NewWriter(buf)) @@ -37,16 +37,17 @@ func testEmptyLog(t *testing.T) func() { for { line, err := buf.ReadBytes('\n') if err != nil { - assert.Equal(t, io.EOF, err) + require.Equal(t, io.EOF, err) break } - assert.Empty(t, string(line), "log not empty") + require.Empty(t, string(line), "log not empty") } } } func TestSocketListener_tcp_tls(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -84,8 +85,8 @@ func TestSocketListener_unix_tls(t *testing.T) { defer sl.Stop() tlsCfg, err := pki.TLSClientConfig().TLSConfig() - tlsCfg.InsecureSkipVerify = true require.NoError(t, err) + tlsCfg.InsecureSkipVerify = true secureClient, err := tls.Dial("unix", sock, tlsCfg) require.NoError(t, err) @@ -94,7 +95,8 @@ func TestSocketListener_unix_tls(t *testing.T) { } func TestSocketListener_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -113,7 +115,8 @@ func TestSocketListener_tcp(t *testing.T) { } func TestSocketListener_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -137,7 +140,8 @@ func TestSocketListener_unix(t *testing.T) { defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() f, _ := os.Create(sock) require.NoError(t, f.Close()) @@ -167,7 +171,8 @@ func TestSocketListener_unixgram(t *testing.T) { defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() _, err = os.Create(sock) require.NoError(t, err) @@ -188,7 +193,8 @@ func TestSocketListener_unixgram(t *testing.T) { } func TestSocketListenerDecode_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -208,7 +214,8 @@ func TestSocketListenerDecode_tcp(t *testing.T) { } func TestSocketListenerDecode_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -256,18 +263,18 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { m3 := acc.Metrics[2] acc.Unlock() - assert.Equal(t, "test", m1.Measurement) - assert.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) - assert.True(t, time.Unix(0, 123456789).Equal(m1.Time)) + require.Equal(t, "test", m1.Measurement) + require.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) + require.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) + require.True(t, time.Unix(0, 123456789).Equal(m1.Time)) - assert.Equal(t, "test", m2.Measurement) - assert.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) - assert.True(t, time.Unix(0, 123456790).Equal(m2.Time)) + require.Equal(t, "test", m2.Measurement) + require.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) + require.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) + require.True(t, time.Unix(0, 123456790).Equal(m2.Time)) - assert.Equal(t, "test", m3.Measurement) - assert.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) - assert.True(t, time.Unix(0, 123456791).Equal(m3.Time)) + require.Equal(t, "test", m3.Measurement) + require.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) + require.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) + require.True(t, time.Unix(0, 123456791).Equal(m3.Time)) } diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 08531e7433b34..c74c3bcf6b09e 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -202,7 +202,7 @@ func getCoresFromStatus(adminCoresStatus *AdminCoresStatus) []string { // Add core metrics from admin to accumulator // This is the only point where size_in_bytes is available (as far as I checked) -func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, time time.Time) { +func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, measurementTime time.Time) { for core, metrics := range adminCoreStatus.Status { coreFields := map[string]interface{}{ "deleted_docs": metrics.Index.DeletedDocs, @@ -214,13 +214,13 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo "solr_admin", coreFields, map[string]string{"core": core}, - time, + measurementTime, ) } } // Add core metrics section to accumulator -func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var coreMetrics map[string]Core if len(mBeansData.SolrMbeans) < 2 { return fmt.Errorf("no core metric data to unmarshal") @@ -243,14 +243,14 @@ func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBea map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil } // Add query metrics section to accumulator -func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var queryMetrics map[string]QueryHandler if len(mBeansData.SolrMbeans) < 4 { @@ -284,7 +284,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -324,7 +324,7 @@ func convertQueryHandlerMap(value map[string]interface{}) map[string]interface{} } // Add update metrics section to accumulator -func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var updateMetrics map[string]UpdateHandler if len(mBeansData.SolrMbeans) < 6 { @@ -363,7 +363,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -404,7 +404,7 @@ func getInt(unk interface{}) int64 { } // Add cache metrics section to accumulator -func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { if len(mBeansData.SolrMbeans) < 8 { return fmt.Errorf("no cache metric data to unmarshal") } @@ -444,7 +444,7 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 86418a2e65054..def051836c024 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -4,13 +4,13 @@ import ( "database/sql" "errors" "fmt" - "log" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/adal" mssql "github.com/denisenkom/go-mssqldb" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" @@ -18,18 +18,20 @@ import ( // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - AuthMethod string `toml:"auth_method"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - DatabaseType string `toml:"database_type"` - IncludeQuery []string `toml:"include_query"` - ExcludeQuery []string `toml:"exclude_query"` - HealthMetric bool `toml:"health_metric"` - pools []*sql.DB - queries MapQuery - adalToken *adal.Token - muCacheLock sync.RWMutex + Servers []string `toml:"servers"` + AuthMethod string `toml:"auth_method"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + DatabaseType string `toml:"database_type"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` + Log telegraf.Logger `toml:"-"` + + pools []*sql.DB + queries MapQuery + adalToken *adal.Token + muCacheLock sync.RWMutex } // Query struct @@ -142,10 +144,10 @@ type scanner interface { Scan(dest ...interface{}) error } -func initQueries(s *SQLServer) error { +func (s *SQLServer) initQueries() error { s.queries = make(MapQuery) queries := s.queries - log.Printf("I! [inputs.sqlserver] Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) + s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) // New config option database_type // To prevent query definition conflicts @@ -202,7 +204,7 @@ func initQueries(s *SQLServer) error { } // Decide if we want to run version 1 or version 2 queries if s.QueryVersion == 2 { - log.Println("W! DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.") + s.Log.Warn("DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false} queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false} @@ -213,7 +215,7 @@ func initQueries(s *SQLServer) error { queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} } else { - log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.") + s.Log.Warn("DEPRECATED: query_version=1 has been deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false} queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false} @@ -242,7 +244,7 @@ func initQueries(s *SQLServer) error { for query := range queries { querylist = append(querylist, query) } - log.Printf("I! [inputs.sqlserver] Config: Effective Queries: %#v\n", querylist) + s.Log.Infof("Config: Effective Queries: %#v\n", querylist) return nil } @@ -283,7 +285,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { // Start initialize a list of connection pools func (s *SQLServer) Start(acc telegraf.Accumulator) error { - if err := initQueries(s); err != nil { + if err := s.initQueries(); err != nil { acc.AddError(err) return err } @@ -355,11 +357,11 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul // Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx if sqlerr, ok := err.(mssql.Error); ok { - return fmt.Errorf("Query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, + return fmt.Errorf("query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err) } - return fmt.Errorf("Query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) + return fmt.Errorf("query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) } defer rows.Close() @@ -425,7 +427,7 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e // values for header, val := range columnMap { if _, ok := (*val).(string); !ok { - fields[header] = (*val) + fields[header] = *val } } // add fields to Accumulator @@ -476,7 +478,7 @@ func (s *SQLServer) getDatabaseTypeToLog() string { func (s *SQLServer) Init() error { if len(s.Servers) == 0 { - log.Println("W! Warning: Server list is empty.") + s.Log.Warn("Warning: Server list is empty.") } return nil diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index a9a022bd23fa7..9d1ee29187e22 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -32,8 +32,9 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { QueryVersion: 2, IncludeQuery: test["IncludeQuery"].([]string), ExcludeQuery: test["ExcludeQuery"].([]string), + Log: testutil.Logger{}, } - require.NoError(t, initQueries(&s)) + require.NoError(t, s.initQueries()) require.Equal(t, len(s.queries), test["queriesTotal"].(int)) for _, query := range test["queries"].([]string) { require.Contains(t, s.queries, query) @@ -116,10 +117,12 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { s := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -151,11 +154,13 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { s := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"DatabaseSize"}, HealthMetric: true, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -192,12 +197,14 @@ func TestSqlServer_HealthMetric(t *testing.T) { IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, HealthMetric: true, AuthMethod: "connection_string", + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{fakeServer1}, IncludeQuery: []string{"DatabaseSize"}, AuthMethod: "connection_string", + Log: testutil.Logger{}, } // acc1 should have the health metric because it is specified in the config @@ -225,16 +232,17 @@ func TestSqlServer_HealthMetric(t *testing.T) { } func TestSqlServer_MultipleInit(t *testing.T) { - s := &SQLServer{} + s := &SQLServer{Log: testutil.Logger{}} s2 := &SQLServer{ ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } - require.NoError(t, initQueries(s)) + require.NoError(t, s.initQueries()) _, ok := s.queries["DatabaseSize"] require.True(t, ok) - require.NoError(t, initQueries(s2)) + require.NoError(t, s.initQueries()) _, ok = s2.queries["DatabaseSize"] require.False(t, ok) s.Stop() @@ -335,11 +343,13 @@ func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { Servers: []string{testServer}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, DatabaseType: "AzureSQLDB", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -376,11 +386,13 @@ func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { Servers: []string{testServer2019}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } s2012 := &SQLServer{ Servers: []string{testServer2012}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } var acc2019, acc2012 testutil.Accumulator diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index b1d6ea59d2f3b..648e82624a1ea 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -10,18 +10,19 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/internal/limiter" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. - "github.com/influxdata/telegraf/selfstat" "google.golang.org/api/iterator" distributionpb "google.golang.org/genproto/googleapis/api/distribution" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. + "github.com/influxdata/telegraf/selfstat" ) const ( @@ -312,8 +313,8 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { } wg.Wait() - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } return nil @@ -643,35 +644,34 @@ func (s *Stackdriver) gatherTimeSeries( } // AddDistribution adds metrics from a distribution value type. -func (s *Stackdriver) addDistribution( - metric *distributionpb.Distribution, - tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +func (s *Stackdriver) addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time, + grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, ) error { field := tsConf.fieldKey name := tsConf.measurement - if err := grouper.Add(name, tags, ts, field+"_count", metric.Count); err != nil { + if err := grouper.Add(name, tags, ts, field+"_count", dist.Count); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_mean", metric.Mean); err != nil { + if err := grouper.Add(name, tags, ts, field+"_mean", dist.Mean); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation); err != nil { + if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", dist.SumOfSquaredDeviation); err != nil { return err } - if metric.Range != nil { - if err := grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min); err != nil { + if dist.Range != nil { + if err := grouper.Add(name, tags, ts, field+"_range_min", dist.Range.Min); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max); err != nil { + if err := grouper.Add(name, tags, ts, field+"_range_max", dist.Range.Max); err != nil { return err } } - linearBuckets := metric.BucketOptions.GetLinearBuckets() - exponentialBuckets := metric.BucketOptions.GetExponentialBuckets() - explicitBuckets := metric.BucketOptions.GetExplicitBuckets() + linearBuckets := dist.BucketOptions.GetLinearBuckets() + exponentialBuckets := dist.BucketOptions.GetExponentialBuckets() + explicitBuckets := dist.BucketOptions.GetExplicitBuckets() var numBuckets int32 if linearBuckets != nil { @@ -704,8 +704,8 @@ func (s *Stackdriver) addDistribution( // Add to the cumulative count; trailing buckets with value 0 are // omitted from the response. - if i < int32(len(metric.BucketCounts)) { - count += metric.BucketCounts[i] + if i < int32(len(dist.BucketCounts)) { + count += dist.BucketCounts[i] } if err := grouper.Add(name, tags, ts, field+"_bucket", count); err != nil { return err diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 77a01f5586a7b..df35198b129d3 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -120,11 +120,10 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam case "s:": fields["source_type_name"] = rawMetadataFields[i][2:] default: - if rawMetadataFields[i][0] == '#' { - parseDataDogTags(tags, rawMetadataFields[i][1:]) - } else { + if rawMetadataFields[i][0] != '#' { return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) } + parseDataDogTags(tags, rawMetadataFields[i][1:]) } } // Use source tag because host is reserved tag key in Telegraf. diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index fbbfef251adf9..d23a79225c392 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -11,13 +11,14 @@ import ( "sync" "time" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" - "github.com/pkg/errors" ) const ( @@ -745,10 +746,10 @@ func (s *Statsd) parseStatsdLine(line string) error { // config file. If there is a match, it will parse the name of the metric and // map of tags. // Return values are (, , ) -func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { +func (s *Statsd) parseName(bucket string) (name string, field string, tags map[string]string) { s.Lock() defer s.Unlock() - tags := make(map[string]string) + tags = make(map[string]string) bucketparts := strings.Split(bucket, ",") // Parse out any tags in the bucket @@ -761,8 +762,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } } - var field string - name := bucketparts[0] + name = bucketparts[0] p := s.graphiteParser var err error @@ -789,10 +789,8 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } // Parse the key,value out of a string that looks like "key=value" -func parseKeyValue(keyvalue string) (string, string) { - var key, val string - - split := strings.Split(keyvalue, "=") +func parseKeyValue(keyValue string) (key string, val string) { + split := strings.Split(keyValue, "=") // Must be exactly 2 to get anything meaningful out of them if len(split) == 2 { key = split[0] diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index a236d638ba330..48889aa43bf67 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -7,11 +7,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -53,19 +52,19 @@ func TestConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected when max==1 @@ -84,17 +83,17 @@ func TestConcurrentConns1(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected @@ -112,9 +111,9 @@ func TestCloseConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) listener.Stop() } @@ -156,7 +155,7 @@ func sendRequests(conn net.Conn, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 25000; i++ { //nolint:errcheck,revive - fmt.Fprintf(conn, testMsg) + fmt.Fprint(conn, testMsg) } } @@ -476,7 +475,7 @@ func TestParse_Distributions(t *testing.T) { parseMetrics() for key, value := range validMeasurementMap { field := map[string]interface{}{ - "value": float64(value), + "value": value, } acc.AssertContainsFields(t, key, field) } @@ -1570,7 +1569,7 @@ func testValidateGauge( } if valueExpected != valueActual { - return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) + return fmt.Errorf("measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) } return nil } @@ -1590,6 +1589,8 @@ func TestTCP(t *testing.T) { addr := statsd.TCPlistener.Addr().String() conn, err := net.Dial("tcp", addr) + require.NoError(t, err) + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) require.NoError(t, conn.Close()) diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index f3fc5f14eb394..cd13676cf6fae 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -11,9 +11,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` @@ -388,11 +389,13 @@ func TestSuricataParse(t *testing.T) { for _, tc := range tests { data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) + s := Suricata{ Delimiter: "_", } acc := testutil.Accumulator{} - s.parse(&acc, data) + err = s.parse(&acc, data) + require.NoError(t, err) testutil.RequireMetricsEqual(t, tc.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index e8fbe62989055..0f50322666fd7 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -7,9 +7,9 @@ import ( "os" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestSynproxyFileNormal(t *testing.T) { @@ -38,8 +38,8 @@ func TestSynproxyFileHeaderMismatch(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid number of columns in data") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid number of columns in data") } func TestSynproxyFileInvalidHex(t *testing.T) { @@ -52,8 +52,8 @@ func TestSynproxyFileInvalidHex(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid value") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid value") } func TestNoSynproxyFile(t *testing.T) { @@ -69,7 +69,7 @@ func TestNoSynproxyFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } // Valid Synproxy file @@ -149,7 +149,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) acc.AssertContainsFields(t, "synproxy", telegrafData) } diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index fc7eab1fa0828..bfc6f9283990c 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -13,7 +13,7 @@ import ( "time" "unicode" - syslog "github.com/influxdata/go-syslog/v3" + "github.com/influxdata/go-syslog/v3" "github.com/influxdata/go-syslog/v3/nontransparent" "github.com/influxdata/go-syslog/v3/octetcounting" "github.com/influxdata/go-syslog/v3/rfc3164" @@ -205,7 +205,7 @@ func (s *Syslog) Stop() { // getAddressParts returns the address scheme and host // it also sets defaults for them when missing // when the input address does not specify the protocol it returns an error -func getAddressParts(a string) (string, string, error) { +func getAddressParts(a string) (scheme string, host string, err error) { parts := strings.SplitN(a, "://", 2) if len(parts) != 2 { return "", "", fmt.Errorf("missing protocol within address '%s'", a) @@ -220,7 +220,6 @@ func getAddressParts(a string) (string, string, error) { return parts[0], parts[1], nil } - var host string if u.Hostname() != "" { host = u.Hostname() } @@ -259,7 +258,7 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { message, err := p.Parse(b[:n]) if message != nil { - acc.AddFields("syslog", fields(message, s), tags(message), s.time()) + acc.AddFields("syslog", fields(message, s), tags(message), s.currentTime()) } if err != nil { acc.AddError(err) @@ -383,7 +382,7 @@ func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { acc.AddError(res.Error) } if res.Message != nil { - acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.time()) + acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.currentTime()) } } @@ -473,7 +472,7 @@ func (uc unixCloser) Close() error { return err } -func (s *Syslog) time() time.Time { +func (s *Syslog) currentTime() time.Time { t := s.now() if t == s.lastTime { t = t.Add(time.Nanosecond) diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 7e69ff41ccdf2..c9ac67afcffef 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -241,9 +241,9 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e } r := bufio.NewReader(stdout) - csv := csv.NewReader(r) - csv.Comma = '\t' - csv.FieldsPerRecord = 6 + csvReader := csv.NewReader(r) + csvReader.Comma = '\t' + csvReader.FieldsPerRecord = 6 var measurement string // groupData to accumulate data when Group=true type groupData struct { @@ -252,7 +252,7 @@ func (s *Sysstat) parse(acc telegraf.Accumulator, option string, ts time.Time) e } m := make(map[string]groupData) for { - record, err := csv.Read() + record, err := csvReader.Read() if err == io.EOF { break } diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 64b596bb329ba..f4f96823a887d 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -10,8 +10,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var s = Sysstat{ @@ -310,5 +311,6 @@ dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30 default: } // some code here to check arguments perhaps? + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index d835d02633d02..70605cf4fc10a 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,13 +5,13 @@ import ( "path/filepath" "strings" - "github.com/influxdata/telegraf/internal" - "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" + + "github.com/influxdata/telegraf/internal" ) type PS interface { @@ -46,18 +46,18 @@ type SystemPSDisk struct{} func (s *SystemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { var cpuTimes []cpu.TimesStat if perCPU { - if perCPUTimes, err := cpu.Times(true); err == nil { - cpuTimes = append(cpuTimes, perCPUTimes...) - } else { + perCPUTimes, err := cpu.Times(true) + if err != nil { return nil, err } + cpuTimes = append(cpuTimes, perCPUTimes...) } if totalCPU { - if totalCPUTimes, err := cpu.Times(false); err == nil { - cpuTimes = append(cpuTimes, totalCPUTimes...) - } else { + totalCPUTimes, err := cpu.Times(false) + if err != nil { return nil, err } + cpuTimes = append(cpuTimes, totalCPUTimes...) } return cpuTimes, nil } From 3dc5281632e30c0636d2bcf884d0c6e0f93d939a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Thu, 18 Nov 2021 17:26:24 +0100 Subject: [PATCH 050/133] fix: Linter fixes for plugins/outputs/[a-f]* (#10124) --- plugins/outputs/amon/amon.go | 2 +- plugins/outputs/amqp/amqp.go | 52 ++++---- plugins/outputs/amqp/client.go | 10 +- .../application_insights_test.go | 121 +++++++++--------- .../outputs/azure_monitor/azure_monitor.go | 27 ++-- plugins/outputs/cloud_pubsub/pubsub_test.go | 17 ++- plugins/outputs/cloudwatch/cloudwatch.go | 23 ++-- plugins/outputs/cloudwatch/cloudwatch_test.go | 57 ++++----- .../cloudwatch_logs/cloudwatch_logs_test.go | 7 +- plugins/outputs/cratedb/cratedb.go | 15 ++- plugins/outputs/datadog/datadog.go | 2 +- plugins/outputs/datadog/datadog_test.go | 12 +- .../outputs/elasticsearch/elasticsearch.go | 46 +++---- .../elasticsearch/elasticsearch_test.go | 8 ++ plugins/outputs/exec/exec.go | 14 +- plugins/outputs/exec/exec_test.go | 6 +- plugins/outputs/execd/execd_test.go | 13 +- plugins/outputs/file/file_test.go | 114 ++++++++--------- 18 files changed, 279 insertions(+), 267 deletions(-) diff --git a/plugins/outputs/amon/amon.go b/plugins/outputs/amon/amon.go index 5bbbba9814e38..952d3b0235e38 100644 --- a/plugins/outputs/amon/amon.go +++ b/plugins/outputs/amon/amon.go @@ -142,7 +142,7 @@ func (p *Point) setValue(v interface{}) error { case float32: p[1] = float64(d) case float64: - p[1] = float64(d) + p[1] = d default: return fmt.Errorf("undeterminable type") } diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index 95da1f99b0f9f..5224928f786d5 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -5,13 +5,14 @@ import ( "strings" "time" + "github.com/streadway/amqp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/streadway/amqp" ) const ( @@ -180,11 +181,11 @@ func (q *AMQP) SetSerializer(serializer serializers.Serializer) { func (q *AMQP) Connect() error { if q.config == nil { - config, err := q.makeClientConfig() + clientConfig, err := q.makeClientConfig() if err != nil { return err } - q.config = config + q.config = clientConfig } var err error @@ -251,8 +252,8 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { if err != nil { // If this is the first attempt to publish and the connection is // closed, try to reconnect and retry once. + //nolint: revive // Simplifying if-else with early return will reduce clarity if aerr, ok := err.(*amqp.Error); first && ok && aerr == amqp.ErrClosed { - first = false q.client = nil err := q.publish(key, body) if err != nil { @@ -268,7 +269,9 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 { q.Log.Debug("Sent MaxMessages; closing connection") - q.client.Close() + if err := q.client.Close(); err != nil { + q.Log.Errorf("Closing connection failed: %v", err) + } q.client = nil } @@ -315,52 +318,53 @@ func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) { } func (q *AMQP) makeClientConfig() (*ClientConfig, error) { - config := &ClientConfig{ + clientConfig := &ClientConfig{ exchange: q.Exchange, exchangeType: q.ExchangeType, exchangePassive: q.ExchangePassive, encoding: q.ContentEncoding, timeout: time.Duration(q.Timeout), + log: q.Log, } switch q.ExchangeDurability { case "transient": - config.exchangeDurable = false + clientConfig.exchangeDurable = false default: - config.exchangeDurable = true + clientConfig.exchangeDurable = true } - config.brokers = q.Brokers - if len(config.brokers) == 0 { - config.brokers = []string{q.URL} + clientConfig.brokers = q.Brokers + if len(clientConfig.brokers) == 0 { + clientConfig.brokers = []string{q.URL} } switch q.DeliveryMode { case "transient": - config.deliveryMode = amqp.Transient + clientConfig.deliveryMode = amqp.Transient case "persistent": - config.deliveryMode = amqp.Persistent + clientConfig.deliveryMode = amqp.Persistent default: - config.deliveryMode = amqp.Transient + clientConfig.deliveryMode = amqp.Transient } if len(q.Headers) > 0 { - config.headers = make(amqp.Table, len(q.Headers)) + clientConfig.headers = make(amqp.Table, len(q.Headers)) for k, v := range q.Headers { - config.headers[k] = v + clientConfig.headers[k] = v } } else { // Copy deprecated fields into message header - config.headers = amqp.Table{ + clientConfig.headers = amqp.Table{ "database": q.Database, "retention_policy": q.RetentionPolicy, } } if len(q.ExchangeArguments) > 0 { - config.exchangeArguments = make(amqp.Table, len(q.ExchangeArguments)) + clientConfig.exchangeArguments = make(amqp.Table, len(q.ExchangeArguments)) for k, v := range q.ExchangeArguments { - config.exchangeArguments[k] = v + clientConfig.exchangeArguments[k] = v } } @@ -368,7 +372,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { if err != nil { return nil, err } - config.tlsConfig = tlsConfig + clientConfig.tlsConfig = tlsConfig var auth []amqp.Authentication if strings.ToUpper(q.AuthMethod) == "EXTERNAL" { @@ -381,13 +385,13 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { }, } } - config.auth = auth + clientConfig.auth = auth - return config, nil + return clientConfig, nil } -func connect(config *ClientConfig) (Client, error) { - return Connect(config) +func connect(clientConfig *ClientConfig) (Client, error) { + return Connect(clientConfig) } func init() { diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go index 8c230b706b09a..af0ef5470e8ed 100644 --- a/plugins/outputs/amqp/client.go +++ b/plugins/outputs/amqp/client.go @@ -4,12 +4,13 @@ import ( "crypto/tls" "errors" "fmt" - "log" "math/rand" "net" "time" "github.com/streadway/amqp" + + "github.com/influxdata/telegraf" ) type ClientConfig struct { @@ -25,6 +26,7 @@ type ClientConfig struct { tlsConfig *tls.Config timeout time.Duration auth []amqp.Authentication + log telegraf.Logger } type client struct { @@ -42,7 +44,7 @@ func Connect(config *ClientConfig) (*client, error) { p := rand.Perm(len(config.brokers)) for _, n := range p { broker := config.brokers[n] - log.Printf("D! Output [amqp] connecting to %q", broker) + config.log.Debugf("Connecting to %q", broker) conn, err := amqp.DialConfig( broker, amqp.Config{ TLSClientConfig: config.tlsConfig, @@ -53,10 +55,10 @@ func Connect(config *ClientConfig) (*client, error) { }) if err == nil { client.conn = conn - log.Printf("D! Output [amqp] connected to %q", broker) + config.log.Debugf("Connected to %q", broker) break } - log.Printf("D! Output [amqp] error connecting to %q - %s", broker, err.Error()) + config.log.Debugf("Error connecting to %q - %v", broker, err.Error()) } if client.conn == nil { diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index b685f6c318d05..fd0759343645a 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -5,21 +5,18 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/microsoft/ApplicationInsights-Go/appinsights" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/application_insights/mocks" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" + "github.com/influxdata/telegraf/testutil" ) func TestConnectFailsIfNoIkey(t *testing.T) { - assert := assert.New(t) - transmitter := new(mocks.Transmitter) transmitter.On("Close").Return(closed) @@ -31,12 +28,10 @@ func TestConnectFailsIfNoIkey(t *testing.T) { } err := ai.Connect() - assert.Error(err) + require.Error(t, err) } func TestOutputCloseTimesOut(t *testing.T) { - assert := assert.New(t) - transmitter := new(mocks.Transmitter) transmitter.On("Close").Return(unfinished) @@ -47,13 +42,11 @@ func TestOutputCloseTimesOut(t *testing.T) { } err := ai.Close() - assert.NoError(err) + require.NoError(t, err) transmitter.AssertCalled(t, "Close") } func TestCloseRemovesDiagMsgListener(t *testing.T) { - assert := assert.New(t) - transmitter := new(mocks.Transmitter) transmitter.On("Close").Return(closed) @@ -75,11 +68,11 @@ func TestCloseRemovesDiagMsgListener(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) diagMsgSubscriber.AssertCalled(t, "Subscribe", mock.AnythingOfType("appinsights.DiagnosticsMessageHandler")) err = ai.Close() - assert.NoError(err) + require.NoError(t, err) transmitter.AssertCalled(t, "Close") diagMsgListener.AssertCalled(t, "Remove") } @@ -137,7 +130,6 @@ func TestAggregateMetricCreated(t *testing.T) { for _, tt := range tests { tf := func(t *testing.T) { - assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) @@ -158,17 +150,18 @@ func TestAggregateMetricCreated(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) mSet := []telegraf.Metric{m} - ai.Write(mSet) + err = ai.Write(mSet) + require.NoError(t, err) transmitter.AssertNumberOfCalls(t, "Track", 1+len(tt.additionalMetricValueFields)) var pAggregateTelemetry *appinsights.AggregateMetricTelemetry - assert.IsType(pAggregateTelemetry, transmitter.Calls[len(transmitter.Calls)-1].Arguments.Get(0), "Expected last telemetry to be AggregateMetricTelemetry") + require.IsType(t, pAggregateTelemetry, transmitter.Calls[len(transmitter.Calls)-1].Arguments.Get(0), "Expected last telemetry to be AggregateMetricTelemetry") aggregateTelemetry := transmitter.Calls[len(transmitter.Calls)-1].Arguments.Get(0).(*appinsights.AggregateMetricTelemetry) - verifyAggregateTelemetry(assert, m, tt.valueField, tt.countField, aggregateTelemetry) + verifyAggregateTelemetry(t, m, tt.valueField, tt.countField, aggregateTelemetry) - verifyAdditionalTelemetry(assert, m, transmitter, tt.additionalMetricValueFields, metricName) + verifyAdditionalTelemetry(t, m, transmitter, tt.additionalMetricValueFields, metricName) } t.Run(tt.name, tf) @@ -195,7 +188,6 @@ func TestSimpleMetricCreated(t *testing.T) { for _, tt := range tests { tf := func(t *testing.T) { - assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) @@ -216,10 +208,11 @@ func TestSimpleMetricCreated(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) mSet := []telegraf.Metric{m} - ai.Write(mSet) + err = ai.Write(mSet) + require.NoError(t, err) expectedNumberOfCalls := len(tt.additionalMetricValueFields) if tt.primaryMetricValueField != "" { @@ -229,7 +222,7 @@ func TestSimpleMetricCreated(t *testing.T) { transmitter.AssertNumberOfCalls(t, "Track", expectedNumberOfCalls) if tt.primaryMetricValueField != "" { var pMetricTelemetry *appinsights.MetricTelemetry - assert.IsType(pMetricTelemetry, transmitter.Calls[0].Arguments.Get(0), "First created telemetry should be simple MetricTelemetry") + require.IsType(t, pMetricTelemetry, transmitter.Calls[0].Arguments.Get(0), "First created telemetry should be simple MetricTelemetry") metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.MetricTelemetry) var expectedTelemetryName string @@ -238,10 +231,10 @@ func TestSimpleMetricCreated(t *testing.T) { } else { expectedTelemetryName = m.Name() + "_" + tt.primaryMetricValueField } - verifySimpleTelemetry(assert, m, tt.primaryMetricValueField, expectedTelemetryName, metricTelemetry) + verifySimpleTelemetry(t, m, tt.primaryMetricValueField, expectedTelemetryName, metricTelemetry) } - verifyAdditionalTelemetry(assert, m, transmitter, tt.additionalMetricValueFields, metricName) + verifyAdditionalTelemetry(t, m, transmitter, tt.additionalMetricValueFields, metricName) } t.Run(tt.name, tf) @@ -265,7 +258,6 @@ func TestTagsAppliedToTelemetry(t *testing.T) { for _, tt := range tests { tf := func(t *testing.T) { - assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) @@ -286,15 +278,16 @@ func TestTagsAppliedToTelemetry(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) mSet := []telegraf.Metric{m} - ai.Write(mSet) + err = ai.Write(mSet) + require.NoError(t, err) transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields)) transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry")) // Will verify that all original tags are present in telemetry.Properties map - verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName) + verifyAdditionalTelemetry(t, m, transmitter, tt.metricValueFields, metricName) } t.Run(tt.name, tf) @@ -302,7 +295,6 @@ func TestTagsAppliedToTelemetry(t *testing.T) { } func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { - assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) @@ -327,19 +319,19 @@ func TestContextTagsSetOnSimpleTelemetry(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) mSet := []telegraf.Metric{m} - ai.Write(mSet) + err = ai.Write(mSet) + require.NoError(t, err) transmitter.AssertNumberOfCalls(t, "Track", 1) metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.MetricTelemetry) cloudTags := metricTelemetry.Tags.Cloud() - assert.Equal("atcsvc", cloudTags.GetRole()) - assert.Equal("bunkie17554", cloudTags.GetRoleInstance()) + require.Equal(t, "atcsvc", cloudTags.GetRole()) + require.Equal(t, "bunkie17554", cloudTags.GetRoleInstance()) } func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { - assert := assert.New(t) now := time.Now().UTC() transmitter := new(mocks.Transmitter) @@ -364,15 +356,16 @@ func TestContextTagsSetOnAggregateTelemetry(t *testing.T) { } err := ai.Connect() - assert.NoError(err) + require.NoError(t, err) mSet := []telegraf.Metric{m} - ai.Write(mSet) + err = ai.Write(mSet) + require.NoError(t, err) transmitter.AssertNumberOfCalls(t, "Track", 1) metricTelemetry := transmitter.Calls[0].Arguments.Get(0).(*appinsights.AggregateMetricTelemetry) cloudTags := metricTelemetry.Tags.Cloud() - assert.Equal("atcsvc", cloudTags.GetRole()) - assert.Equal("bunkie17554", cloudTags.GetRoleInstance()) + require.Equal(t, "atcsvc", cloudTags.GetRole()) + require.Equal(t, "bunkie17554", cloudTags.GetRoleInstance()) } func closed() <-chan struct{} { @@ -387,49 +380,49 @@ func unfinished() <-chan struct{} { } func verifyAggregateTelemetry( - assert *assert.Assertions, - metric telegraf.Metric, + t *testing.T, + m telegraf.Metric, valueField string, countField string, telemetry *appinsights.AggregateMetricTelemetry, ) { verifyAggregateField := func(fieldName string, telemetryValue float64) { - metricRawFieldValue, found := metric.Fields()[fieldName] + metricRawFieldValue, found := m.Fields()[fieldName] if !found { return } if _, err := toFloat64(metricRawFieldValue); err == nil { - assert.EqualValues(metricRawFieldValue, telemetryValue, "Telemetry property %s does not match the metric field", fieldName) + require.EqualValues(t, metricRawFieldValue, telemetryValue, "Telemetry property %s does not match the metric field", fieldName) } } - assert.Equal(metric.Name(), telemetry.Name, "Telemetry name should be the same as metric name") - assert.EqualValues(metric.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") - assert.EqualValues(metric.Fields()[countField], telemetry.Count, "Telemetry sample count does not mach metric sample count field") + require.Equal(t, m.Name(), telemetry.Name, "Telemetry name should be the same as metric name") + require.EqualValues(t, m.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") + require.EqualValues(t, m.Fields()[countField], telemetry.Count, "Telemetry sample count does not mach metric sample count field") verifyAggregateField("min", telemetry.Min) verifyAggregateField("max", telemetry.Max) verifyAggregateField("stdev", telemetry.StdDev) verifyAggregateField("variance", telemetry.Variance) - assert.Equal(metric.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") - assertMapContains(assert, metric.Tags(), telemetry.Properties) + require.Equal(t, m.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") + assertMapContains(t, m.Tags(), telemetry.Properties) } func verifySimpleTelemetry( - assert *assert.Assertions, - metric telegraf.Metric, + t *testing.T, + m telegraf.Metric, valueField string, expectedTelemetryName string, telemetry *appinsights.MetricTelemetry, ) { - assert.Equal(expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected") - assert.EqualValues(metric.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") - assert.Equal(metric.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") - assertMapContains(assert, metric.Tags(), telemetry.Properties) + require.Equal(t, expectedTelemetryName, telemetry.Name, "Telemetry name is not what was expected") + require.EqualValues(t, m.Fields()[valueField], telemetry.Value, "Telemetry value does not match metric value field") + require.Equal(t, m.Time(), telemetry.Timestamp, "Telemetry and metric timestamps do not match") + assertMapContains(t, m.Tags(), telemetry.Properties) } func verifyAdditionalTelemetry( - assert *assert.Assertions, - metric telegraf.Metric, + t *testing.T, + m telegraf.Metric, transmitter *mocks.Transmitter, additionalMetricValueFields []string, telemetryNamePrefix string, @@ -437,9 +430,9 @@ func verifyAdditionalTelemetry( for _, fieldName := range additionalMetricValueFields { expectedTelemetryName := telemetryNamePrefix + "_" + fieldName telemetry := findTransmittedTelemetry(transmitter, expectedTelemetryName) - assert.NotNil(telemetry, "Expected telemetry named %s to be created, but could not find it", expectedTelemetryName) + require.NotNil(t, telemetry, "Expected telemetry named %s to be created, but could not find it", expectedTelemetryName) if telemetry != nil { - verifySimpleTelemetry(assert, metric, fieldName, expectedTelemetryName, telemetry) + verifySimpleTelemetry(t, m, fieldName, expectedTelemetryName, telemetry) } } } @@ -455,17 +448,17 @@ func findTransmittedTelemetry(transmitter *mocks.Transmitter, telemetryName stri return nil } -func assertMapContains(assert *assert.Assertions, expected, actual map[string]string) { +func assertMapContains(t *testing.T, expected, actual map[string]string) { if expected == nil && actual == nil { return } - assert.NotNil(expected, "Maps not equal: expected is nil but actual is not") - assert.NotNil(actual, "Maps not equal: actual is nil but expected is not") + require.NotNil(t, expected, "Maps not equal: expected is nil but actual is not") + require.NotNil(t, actual, "Maps not equal: actual is nil but expected is not") for k, v := range expected { av, ok := actual[k] - assert.True(ok, "Actual map does not contain a value for key '%s'", k) - assert.Equal(v, av, "The expected value for key '%s' is '%s' but the actual value is '%s", k, v, av) + require.True(t, ok, "Actual map does not contain a value for key '%s'", k) + require.Equal(t, v, av, "The expected value for key '%s' is '%s' but the actual value is '%s", k, v, av) } } diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index ca511a5211860..398be55cd1cfc 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" @@ -208,7 +209,7 @@ func (a *AzureMonitor) Connect() error { } // vmMetadata retrieves metadata about the current Azure VM -func vmInstanceMetadata(c *http.Client) (string, string, error) { +func vmInstanceMetadata(c *http.Client) (region string, resourceID string, err error) { req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil) if err != nil { return "", "", fmt.Errorf("error creating request: %v", err) @@ -235,8 +236,8 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { return "", "", err } - region := metadata.Compute.Location - resourceID := metadata.ResourceID() + region = metadata.Compute.Location + resourceID = metadata.ResourceID() return region, resourceID, nil } @@ -366,20 +367,20 @@ func (a *AzureMonitor) send(body []byte) error { func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 { h := fnv.New64a() - h.Write([]byte(m.Name())) - h.Write([]byte("\n")) + h.Write([]byte(m.Name())) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" for _, tag := range m.TagList() { if tag.Key == "" || tag.Value == "" { continue } - h.Write([]byte(tag.Key)) - h.Write([]byte("\n")) + h.Write([]byte(tag.Key)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" } b := make([]byte, binary.MaxVarintLen64) n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) - h.Write(b[:n]) - h.Write([]byte("\n")) + h.Write(b[:n]) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" return h.Sum64() } @@ -573,10 +574,10 @@ func hashIDWithField(id uint64, fk string) uint64 { h := fnv.New64a() b := make([]byte, binary.MaxVarintLen64) n := binary.PutUvarint(b, id) - h.Write(b[:n]) - h.Write([]byte("\n")) - h.Write([]byte(fk)) - h.Write([]byte("\n")) + h.Write(b[:n]) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(fk)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" return h.Sum64() } diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go index 967a33d742c3c..e342acac44b50 100644 --- a/plugins/outputs/cloud_pubsub/pubsub_test.go +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -1,16 +1,15 @@ package cloud_pubsub import ( - "testing" - "encoding/base64" + "testing" "cloud.google.com/go/pubsub" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestPubSub_WriteSingle(t *testing.T) { @@ -51,8 +50,8 @@ func TestPubSub_WriteWithAttribute(t *testing.T) { for _, testM := range testMetrics { msg := verifyRawMetricPublished(t, testM.m, topic.published) - assert.Equalf(t, "bar1", msg.Attributes["foo1"], "expected attribute foo1=bar1") - assert.Equalf(t, "bar2", msg.Attributes["foo2"], "expected attribute foo2=bar2") + require.Equalf(t, "bar1", msg.Attributes["foo1"], "expected attribute foo1=bar1") + require.Equalf(t, "bar2", msg.Attributes["foo2"], "expected attribute foo2=bar2") } } @@ -74,7 +73,7 @@ func TestPubSub_WriteMultiple(t *testing.T) { for _, testM := range testMetrics { verifyRawMetricPublished(t, testM.m, topic.published) } - assert.Equalf(t, 1, topic.getBundleCount(), "unexpected bundle count") + require.Equalf(t, 1, topic.getBundleCount(), "unexpected bundle count") } func TestPubSub_WriteOverCountThreshold(t *testing.T) { @@ -98,7 +97,7 @@ func TestPubSub_WriteOverCountThreshold(t *testing.T) { for _, testM := range testMetrics { verifyRawMetricPublished(t, testM.m, topic.published) } - assert.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count") + require.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count") } func TestPubSub_WriteOverByteThreshold(t *testing.T) { @@ -121,7 +120,7 @@ func TestPubSub_WriteOverByteThreshold(t *testing.T) { for _, testM := range testMetrics { verifyRawMetricPublished(t, testM.m, topic.published) } - assert.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count") + require.Equalf(t, 2, topic.getBundleCount(), "unexpected bundle count") } func TestPubSub_WriteBase64Single(t *testing.T) { @@ -198,7 +197,7 @@ func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string if !ok { t.Fatalf("expected published metric to have a value") } - assert.Equal(t, v, publishedV, "incorrect published value") + require.Equal(t, v, publishedV, "incorrect published value") return psMsg } diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 129f014bfb548..a48a3ee547920 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -247,7 +247,7 @@ func (c *CloudWatch) WriteToCloudWatch(datums []types.MetricDatum) error { return err } -// Partition the MetricDatums into smaller slices of a max size so that are under the limit +// PartitionDatums partitions the MetricDatums into smaller slices of a max size so that are under the limit // for the AWS API calls. func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum { numberOfPartitions := len(datums) / size @@ -270,7 +270,7 @@ func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum return partitions } -// Make a MetricDatum from telegraf.Metric. It would check if all required fields of +// BuildMetricDatum makes a MetricDatum from telegraf.Metric. It would check if all required fields of // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum { @@ -332,14 +332,14 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel return datums } -// Make a list of Dimensions by using a Point's tags. CloudWatch supports up to -// 10 dimensions per metric so we only keep up to the first 10 alphabetically. +// BuildDimensions makes a list of Dimensions by using a Point's tags. CloudWatch supports up to +// 10 dimensions per metric, so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. func BuildDimensions(mTags map[string]string) []types.Dimension { - const MaxDimensions = 10 - dimensions := make([]types.Dimension, 0, MaxDimensions) + const maxDimensions = 10 + dimensions := make([]types.Dimension, 0, maxDimensions) - // This is pretty ugly but we always want to include the "host" tag if it exists. + // This is pretty ugly, but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { dimensions = append(dimensions, types.Dimension{ Name: aws.String("host"), @@ -356,7 +356,7 @@ func BuildDimensions(mTags map[string]string) []types.Dimension { sort.Strings(keys) for _, k := range keys { - if len(dimensions) >= MaxDimensions { + if len(dimensions) >= maxDimensions { break } @@ -392,7 +392,8 @@ func getStatisticType(name string) (sType statisticType, fieldName string) { sType = statisticTypeNone fieldName = name } - return + + return sType, fieldName } func convert(v interface{}) (value float64, ok bool) { @@ -420,7 +421,7 @@ func convert(v interface{}) (value float64, ok bool) { default: // Skip unsupported type. ok = false - return + return value, ok } // Do CloudWatch boundary checking @@ -436,7 +437,7 @@ func convert(v interface{}) (value float64, ok bool) { return 0, false } - return + return value, ok } func init() { diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index df98381cf3f90..b0f277c447ba8 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -2,26 +2,23 @@ package cloudwatch import ( "fmt" - "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "math" "sort" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Test that each tag becomes one dimension func TestBuildDimensions(t *testing.T) { - const MaxDimensions = 10 - - assert := assert.New(t) + const maxDimensions = 10 testPoint := testutil.TestMetric(1) dimensions := BuildDimensions(testPoint.Tags()) @@ -35,26 +32,24 @@ func TestBuildDimensions(t *testing.T) { sort.Strings(tagKeys) - if len(testPoint.Tags()) >= MaxDimensions { - assert.Equal(MaxDimensions, len(dimensions), "Number of dimensions should be less than MaxDimensions") + if len(testPoint.Tags()) >= maxDimensions { + require.Equal(t, maxDimensions, len(dimensions), "Number of dimensions should be less than MaxDimensions") } else { - assert.Equal(len(testPoint.Tags()), len(dimensions), "Number of dimensions should be equal to number of tags") + require.Equal(t, len(testPoint.Tags()), len(dimensions), "Number of dimensions should be equal to number of tags") } for i, key := range tagKeys { if i >= 10 { break } - assert.Equal(key, *dimensions[i].Name, "Key should be equal") - assert.Equal(testPoint.Tags()[key], *dimensions[i].Value, "Value should be equal") + require.Equal(t, key, *dimensions[i].Name, "Key should be equal") + require.Equal(t, testPoint.Tags()[key], *dimensions[i].Value, "Value should be equal") } } // Test that metrics with valid values have a MetricDatum created where as non valid do not. // Skips "time.Time" type as something is converting the value to string. func TestBuildMetricDatums(t *testing.T) { - assert := assert.New(t) - zero := 0.0 validMetrics := []telegraf.Metric{ testutil.TestMetric(1), @@ -75,11 +70,11 @@ func TestBuildMetricDatums(t *testing.T) { } for _, point := range validMetrics { datums := BuildMetricDatum(false, false, point) - assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) + require.Equal(t, 1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) } for _, point := range invalidMetrics { datums := BuildMetricDatum(false, false, point) - assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) + require.Equal(t, 0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } statisticMetric := metric.New( @@ -89,7 +84,7 @@ func TestBuildMetricDatums(t *testing.T) { time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) datums := BuildMetricDatum(true, false, statisticMetric) - assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) + require.Equal(t, 1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) multiFieldsMetric := metric.New( "test1", @@ -98,7 +93,7 @@ func TestBuildMetricDatums(t *testing.T) { time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) datums = BuildMetricDatum(true, false, multiFieldsMetric) - assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) + require.Equal(t, 4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) multiStatisticMetric := metric.New( "test1", @@ -112,24 +107,22 @@ func TestBuildMetricDatums(t *testing.T) { time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), ) datums = BuildMetricDatum(true, false, multiStatisticMetric) - assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) + require.Equal(t, 7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) } func TestMetricDatumResolution(t *testing.T) { const expectedStandardResolutionValue = int32(60) const expectedHighResolutionValue = int32(1) - assert := assert.New(t) + m := testutil.TestMetric(1) - metric := testutil.TestMetric(1) - - standardResolutionDatum := BuildMetricDatum(false, false, metric) + standardResolutionDatum := BuildMetricDatum(false, false, m) actualStandardResolutionValue := *standardResolutionDatum[0].StorageResolution - assert.Equal(expectedStandardResolutionValue, actualStandardResolutionValue) + require.Equal(t, expectedStandardResolutionValue, actualStandardResolutionValue) - highResolutionDatum := BuildMetricDatum(false, true, metric) + highResolutionDatum := BuildMetricDatum(false, true, m) actualHighResolutionValue := *highResolutionDatum[0].StorageResolution - assert.Equal(expectedHighResolutionValue, actualHighResolutionValue) + require.Equal(t, expectedHighResolutionValue, actualHighResolutionValue) } func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { @@ -150,8 +143,6 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { } func TestPartitionDatums(t *testing.T) { - assert := assert.New(t) - testDatum := types.MetricDatum{ MetricName: aws.String("Foo"), Value: aws.Float64(1), @@ -162,9 +153,9 @@ func TestPartitionDatums(t *testing.T) { twoDatum := []types.MetricDatum{testDatum, testDatum} threeDatum := []types.MetricDatum{testDatum, testDatum, testDatum} - assert.Equal([][]types.MetricDatum{}, PartitionDatums(2, zeroDatum)) - assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]types.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) - assert.Equal([][]types.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) + require.Equal(t, [][]types.MetricDatum{}, PartitionDatums(2, zeroDatum)) + require.Equal(t, [][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + require.Equal(t, [][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + require.Equal(t, [][]types.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + require.Equal(t, [][]types.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go index e103eb53d24e6..1263d665cea21 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -11,10 +11,11 @@ import ( cloudwatchlogsV2 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type mockCloudWatchLogs struct { @@ -57,9 +58,7 @@ func (c *mockCloudWatchLogs) PutLogEvents(_ context.Context, input *cloudwatchlo sequenceToken := "arbitraryToken" output := &cloudwatchlogsV2.PutLogEventsOutput{NextSequenceToken: &sequenceToken} //Saving messages - for _, event := range input.LogEvents { - c.pushedLogEvents = append(c.pushedLogEvents, event) - } + c.pushedLogEvents = append(c.pushedLogEvents, input.LogEvents...) return output, nil } diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index b56787114d709..40c8c2728048d 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -11,10 +11,11 @@ import ( "strings" "time" + _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - _ "github.com/jackc/pgx/v4/stdlib" //to register stdlib from PostgreSQL Driver and Toolkit ) const MaxInt64 = int64(^uint64(0) >> 1) @@ -47,7 +48,7 @@ func (c *CrateDB) Connect() error { if err != nil { return err } else if c.TableCreate { - sql := ` + query := ` CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( "hash_id" LONG INDEX OFF, "timestamp" TIMESTAMP, @@ -60,7 +61,7 @@ CREATE TABLE IF NOT EXISTS ` + c.Table + ` ( ` ctx, cancel := context.WithTimeout(context.Background(), time.Duration(c.Timeout)) defer cancel() - if _, err := db.ExecContext(ctx, sql); err != nil { + if _, err := db.ExecContext(ctx, query); err != nil { return err } } @@ -106,10 +107,10 @@ func insertSQL(table string, keyReplacement string, metrics []telegraf.Metric) ( } rows[i] = `(` + strings.Join(escapedCols, ", ") + `)` } - sql := `INSERT INTO ` + table + ` ("hash_id", "timestamp", "name", "tags", "fields") + query := `INSERT INTO ` + table + ` ("hash_id", "timestamp", "name", "tags", "fields") VALUES ` + strings.Join(rows, " ,\n") + `;` - return sql, nil + return query, nil } // escapeValue returns a string version of val that is suitable for being used @@ -206,7 +207,7 @@ func escapeString(s string, quote string) string { // [1] https://github.com/influxdata/telegraf/pull/3210#discussion_r148411201 func hashID(m telegraf.Metric) int64 { h := sha512.New() - h.Write([]byte(m.Name())) + h.Write([]byte(m.Name())) //nolint:revive // from hash.go: "It never returns an error" tags := m.Tags() tmp := make([]string, len(tags)) i := 0 @@ -217,7 +218,7 @@ func hashID(m telegraf.Metric) int64 { sort.Strings(tmp) for _, s := range tmp { - h.Write([]byte(s)) + h.Write([]byte(s)) //nolint:revive // from hash.go: "It never returns an error" } sum := h.Sum(nil) diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 47d8a4e91a43b..6c89ab1e32746 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -200,7 +200,7 @@ func (p *Point) setValue(v interface{}) error { case uint64: p[1] = float64(d) case float64: - p[1] = float64(d) + p[1] = d case bool: p[1] = float64(0) if d { diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index c893833b44398..4c149bf600cc9 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -10,10 +10,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var ( @@ -36,6 +36,7 @@ func fakeDatadog() *Datadog { func TestUriOverride(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) + //nolint:errcheck,revive // Ignore the returned error as the test will fail anyway json.NewEncoder(w).Encode(`{"status":"ok"}`) })) defer ts.Close() @@ -51,6 +52,7 @@ func TestUriOverride(t *testing.T) { func TestBadStatusCode(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) + //nolint:errcheck,revive // Ignore the returned error as the test will fail anyway json.NewEncoder(w).Encode(`{ 'errors': [ 'Something bad happened to the server.', 'Your query made the server very sad.' @@ -75,7 +77,7 @@ func TestAuthenticatedUrl(t *testing.T) { d := fakeDatadog() authURL := d.authenticatedURL() - assert.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeURL, fakeAPIKey), authURL) + require.EqualValues(t, fmt.Sprintf("%s?api_key=%s", fakeURL, fakeAPIKey), authURL) } func TestBuildTags(t *testing.T) { @@ -173,7 +175,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestMetric(bool(true), "test7"), + testutil.TestMetric(true, "test7"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 1.0, @@ -181,7 +183,7 @@ func TestBuildPoint(t *testing.T) { nil, }, { - testutil.TestMetric(bool(false), "test8"), + testutil.TestMetric(false, "test8"), Point{ float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), 0.0, diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 8f57f4e12ebf5..235a8ee088ec3 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "log" "net/http" "strconv" "strings" @@ -12,12 +11,12 @@ import ( "time" "crypto/sha256" + "gopkg.in/olivere/elastic.v5" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "gopkg.in/olivere/elastic.v5" ) type Elasticsearch struct { @@ -36,6 +35,7 @@ type Elasticsearch struct { OverwriteTemplate bool ForceDocumentID bool `toml:"force_document_id"` MajorReleaseNumber int + Log telegraf.Logger `toml:"-"` tls.ClientConfig Client *elastic.Client @@ -174,7 +174,7 @@ type templatePart struct { func (a *Elasticsearch) Connect() error { if a.URLs == nil || a.IndexName == "" { - return fmt.Errorf("Elasticsearch urls or index_name is not defined") + return fmt.Errorf("elasticsearch urls or index_name is not defined") } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) @@ -213,7 +213,7 @@ func (a *Elasticsearch) Connect() error { clientOptions = append(clientOptions, elastic.SetHealthcheck(false), ) - log.Printf("D! Elasticsearch output: disabling health check") + a.Log.Debugf("Disabling health check") } client, err := elastic.NewClient(clientOptions...) @@ -226,16 +226,16 @@ func (a *Elasticsearch) Connect() error { esVersion, err := client.ElasticsearchVersion(a.URLs[0]) if err != nil { - return fmt.Errorf("Elasticsearch version check failed: %s", err) + return fmt.Errorf("elasticsearch version check failed: %s", err) } // quit if ES version is not supported majorReleaseNumber, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) if err != nil || majorReleaseNumber < 5 { - return fmt.Errorf("Elasticsearch version not supported: %s", esVersion) + return fmt.Errorf("elasticsearch version not supported: %s", esVersion) } - log.Println("I! Elasticsearch version: " + esVersion) + a.Log.Infof("Elasticsearch version: %q", esVersion) a.Client = client a.MajorReleaseNumber = majorReleaseNumber @@ -257,9 +257,9 @@ func GetPointID(m telegraf.Metric) string { var buffer bytes.Buffer //Timestamp(ns),measurement name and Series Hash for compute the final SHA256 based hash ID - buffer.WriteString(strconv.FormatInt(m.Time().Local().UnixNano(), 10)) - buffer.WriteString(m.Name()) - buffer.WriteString(strconv.FormatUint(m.HashID(), 10)) + buffer.WriteString(strconv.FormatInt(m.Time().Local().UnixNano(), 10)) //nolint:revive // from buffer.go: "err is always nil" + buffer.WriteString(m.Name()) //nolint:revive // from buffer.go: "err is always nil" + buffer.WriteString(strconv.FormatUint(m.HashID(), 10)) //nolint:revive // from buffer.go: "err is always nil" return fmt.Sprintf("%x", sha256.Sum256(buffer.Bytes())) } @@ -305,15 +305,15 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { res, err := bulkRequest.Do(ctx) if err != nil { - return fmt.Errorf("Error sending bulk request to Elasticsearch: %s", err) + return fmt.Errorf("error sending bulk request to Elasticsearch: %s", err) } if res.Errors { for id, err := range res.Failed() { - log.Printf("E! Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"]) + a.Log.Errorf("Elasticsearch indexing failure, id: %d, error: %s, caused by: %s, %s", id, err.Error.Reason, err.Error.CausedBy["reason"], err.Error.CausedBy["type"]) break } - return fmt.Errorf("W! Elasticsearch failed to index %d metrics", len(res.Failed())) + return fmt.Errorf("elasticsearch failed to index %d metrics", len(res.Failed())) } return nil @@ -321,13 +321,13 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { func (a *Elasticsearch) manageTemplate(ctx context.Context) error { if a.TemplateName == "" { - return fmt.Errorf("Elasticsearch template_name configuration not defined") + return fmt.Errorf("elasticsearch template_name configuration not defined") } templateExists, errExists := a.Client.IndexTemplateExists(a.TemplateName).Do(ctx) if errExists != nil { - return fmt.Errorf("Elasticsearch template check failed, template name: %s, error: %s", a.TemplateName, errExists) + return fmt.Errorf("elasticsearch template check failed, template name: %s, error: %s", a.TemplateName, errExists) } templatePattern := a.IndexName @@ -341,7 +341,7 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { } if templatePattern == "" { - return fmt.Errorf("Template cannot be created for dynamic index names without an index prefix") + return fmt.Errorf("template cannot be created for dynamic index names without an index prefix") } if (a.OverwriteTemplate) || (!templateExists) || (templatePattern != "") { @@ -353,16 +353,18 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { t := template.Must(template.New("template").Parse(telegrafTemplate)) var tmpl bytes.Buffer - t.Execute(&tmpl, tp) + if err := t.Execute(&tmpl, tp); err != nil { + return err + } _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl.String()).Do(ctx) if errCreateTemplate != nil { - return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate) + return fmt.Errorf("elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate) } - log.Printf("D! Elasticsearch template %s created or updated\n", a.TemplateName) + a.Log.Debugf("Template %s created or updated\n", a.TemplateName) } else { - log.Println("D! Found existing Elasticsearch template. Skipping template management") + a.Log.Debug("Found existing Elasticsearch template. Skipping template management") } return nil } @@ -384,7 +386,7 @@ func (a *Elasticsearch) GetTagKeys(indexName string) (string, []string) { ) indexName = tagReplacer.Replace(indexName) - tagKeys = append(tagKeys, (strings.TrimSpace(tagName))) + tagKeys = append(tagKeys, strings.TrimSpace(tagName)) startTag = strings.Index(indexName, "{{") } @@ -413,7 +415,7 @@ func (a *Elasticsearch) GetIndexName(indexName string, eventTime time.Time, tagK if value, ok := metricTags[key]; ok { tagValues = append(tagValues, value) } else { - log.Printf("D! Tag '%s' not found, using '%s' on index name instead\n", key, a.DefaultTagValue) + a.Log.Debugf("Tag '%s' not found, using '%s' on index name instead\n", key, a.DefaultTagValue) tagValues = append(tagValues, a.DefaultTagValue) } } diff --git a/plugins/outputs/elasticsearch/elasticsearch_test.go b/plugins/outputs/elasticsearch/elasticsearch_test.go index 7ad1e632c6d20..ecfe03f2e0a82 100644 --- a/plugins/outputs/elasticsearch/elasticsearch_test.go +++ b/plugins/outputs/elasticsearch/elasticsearch_test.go @@ -29,6 +29,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { TemplateName: "telegraf", OverwriteTemplate: false, HealthCheckInterval: config.Duration(time.Second * 10), + Log: testutil.Logger{}, } // Verify that we can connect to Elasticsearch @@ -57,6 +58,7 @@ func TestTemplateManagementEmptyTemplateIntegration(t *testing.T) { ManageTemplate: true, TemplateName: "", OverwriteTemplate: true, + Log: testutil.Logger{}, } err := e.manageTemplate(ctx) @@ -78,6 +80,7 @@ func TestTemplateManagementIntegration(t *testing.T) { ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, + Log: testutil.Logger{}, } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) @@ -105,6 +108,7 @@ func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { ManageTemplate: true, TemplateName: "telegraf", OverwriteTemplate: true, + Log: testutil.Logger{}, } err := e.Connect() @@ -114,6 +118,7 @@ func TestTemplateInvalidIndexPatternIntegration(t *testing.T) { func TestGetTagKeys(t *testing.T) { e := &Elasticsearch{ DefaultTagValue: "none", + Log: testutil.Logger{}, } var tests = []struct { @@ -173,6 +178,7 @@ func TestGetTagKeys(t *testing.T) { func TestGetIndexName(t *testing.T) { e := &Elasticsearch{ DefaultTagValue: "none", + Log: testutil.Logger{}, } var tests = []struct { @@ -286,6 +292,7 @@ func TestRequestHeaderWhenGzipIsEnabled(t *testing.T) { Timeout: config.Duration(time.Second * 5), EnableGzip: true, ManageTemplate: false, + Log: testutil.Logger{}, } err := e.Connect() @@ -319,6 +326,7 @@ func TestRequestHeaderWhenGzipIsDisabled(t *testing.T) { Timeout: config.Duration(time.Second * 5), EnableGzip: false, ManageTemplate: false, + Log: testutil.Logger{}, } err := e.Connect() diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index b0313a382045a..68c61e1ca4581 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "log" "os/exec" "runtime" "time" @@ -22,6 +21,7 @@ const maxStderrBytes = 512 type Exec struct { Command []string `toml:"command"` Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` runner Runner serializer serializers.Serializer @@ -42,6 +42,8 @@ var sampleConfig = ` ` func (e *Exec) Init() error { + e.runner = &CommandRunner{log: e.Log} + return nil } @@ -77,7 +79,7 @@ func (e *Exec) Write(metrics []telegraf.Metric) error { if err != nil { return err } - buffer.Write(serializedMetrics) + buffer.Write(serializedMetrics) //nolint:revive // from buffer.go: "err is always nil" if buffer.Len() <= 0 { return nil @@ -94,6 +96,7 @@ type Runner interface { // CommandRunner runs a command with the ability to kill the process before the timeout. type CommandRunner struct { cmd *exec.Cmd + log telegraf.Logger } // Run runs the command. @@ -114,9 +117,9 @@ func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.R s = removeWindowsCarriageReturns(s) if s.Len() > 0 { if !telegraf.Debug { - log.Printf("E! [outputs.exec] Command error: %q", c.truncate(s)) + c.log.Errorf("Command error: %q", c.truncate(s)) } else { - log.Printf("D! [outputs.exec] Command error: %q", s) + c.log.Debugf("Command error: %q", s) } } @@ -147,7 +150,7 @@ func (c *CommandRunner) truncate(buf bytes.Buffer) string { buf.Truncate(i) } if didTruncate { - buf.WriteString("...") + buf.WriteString("...") //nolint:revive // from buffer.go: "err is always nil" } return buf.String() } @@ -155,7 +158,6 @@ func (c *CommandRunner) truncate(buf bytes.Buffer) string { func init() { outputs.Add("exec", func() telegraf.Output { return &Exec{ - runner: &CommandRunner{}, Timeout: config.Duration(time.Second * 5), } }) diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go index e75e1829d3894..40fac6327fee9 100644 --- a/plugins/outputs/exec/exec_test.go +++ b/plugins/outputs/exec/exec_test.go @@ -6,11 +6,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestExec(t *testing.T) { @@ -59,8 +60,7 @@ func TestExec(t *testing.T) { s, _ := serializers.NewInfluxSerializer() e.SetSerializer(s) - e.Connect() - + require.NoError(t, e.Connect()) require.Equal(t, tt.err, e.Write(tt.metrics) != nil) }) } diff --git a/plugins/outputs/execd/execd_test.go b/plugins/outputs/execd/execd_test.go index c14339d31a85a..66bc28561a625 100644 --- a/plugins/outputs/execd/execd_test.go +++ b/plugins/outputs/execd/execd_test.go @@ -11,13 +11,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var now = time.Date(2020, 6, 30, 16, 16, 0, 0, time.UTC) @@ -85,16 +86,20 @@ func runOutputConsumerProgram() { parser := influx.NewStreamParser(os.Stdin) for { - metric, err := parser.Next() + m, err := parser.Next() if err != nil { if err == influx.EOF { return // stream ended } if parseErr, isParseError := err.(*influx.ParseError); isParseError { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr) + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) + //nolint:revive // error code is important for this "test" os.Exit(1) } @@ -104,8 +109,10 @@ func runOutputConsumerProgram() { now, ) - if !testutil.MetricEqual(expected, metric) { + if !testutil.MetricEqual(expected, m) { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "metric doesn't match expected\n") + //nolint:revive // error code is important for this "test" os.Exit(1) } } diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index 5fcdc511972ac..36706c4dc815e 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/serializers" @@ -20,7 +20,7 @@ const ( ) func TestFileExistingFile(t *testing.T) { - fh := createFile() + fh := createFile(t) defer os.Remove(fh.Name()) s, _ := serializers.NewInfluxSerializer() f := File{ @@ -29,20 +29,20 @@ func TestFileExistingFile(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) - validateFile(fh.Name(), expExistFile, t) + validateFile(t, fh.Name(), expExistFile) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) } func TestFileNewFile(t *testing.T) { s, _ := serializers.NewInfluxSerializer() - fh := tmpFile() + fh := tmpFile(t) defer os.Remove(fh) f := File{ Files: []string{fh}, @@ -50,23 +50,23 @@ func TestFileNewFile(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) - validateFile(fh, expNewFile, t) + validateFile(t, fh, expNewFile) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) } func TestFileExistingFiles(t *testing.T) { - fh1 := createFile() + fh1 := createFile(t) defer os.Remove(fh1.Name()) - fh2 := createFile() + fh2 := createFile(t) defer os.Remove(fh2.Name()) - fh3 := createFile() + fh3 := createFile(t) defer os.Remove(fh3.Name()) s, _ := serializers.NewInfluxSerializer() @@ -76,26 +76,26 @@ func TestFileExistingFiles(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) - validateFile(fh1.Name(), expExistFile, t) - validateFile(fh2.Name(), expExistFile, t) - validateFile(fh3.Name(), expExistFile, t) + validateFile(t, fh1.Name(), expExistFile) + validateFile(t, fh2.Name(), expExistFile) + validateFile(t, fh3.Name(), expExistFile) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) } func TestFileNewFiles(t *testing.T) { s, _ := serializers.NewInfluxSerializer() - fh1 := tmpFile() + fh1 := tmpFile(t) defer os.Remove(fh1) - fh2 := tmpFile() + fh2 := tmpFile(t) defer os.Remove(fh2) - fh3 := tmpFile() + fh3 := tmpFile(t) defer os.Remove(fh3) f := File{ Files: []string{fh1, fh2, fh3}, @@ -103,23 +103,23 @@ func TestFileNewFiles(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) - validateFile(fh1, expNewFile, t) - validateFile(fh2, expNewFile, t) - validateFile(fh3, expNewFile, t) + validateFile(t, fh1, expNewFile) + validateFile(t, fh2, expNewFile) + validateFile(t, fh3, expNewFile) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) } func TestFileBoth(t *testing.T) { - fh1 := createFile() + fh1 := createFile(t) defer os.Remove(fh1.Name()) - fh2 := tmpFile() + fh2 := tmpFile(t) defer os.Remove(fh2) s, _ := serializers.NewInfluxSerializer() @@ -129,16 +129,16 @@ func TestFileBoth(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) - validateFile(fh1.Name(), expExistFile, t) - validateFile(fh2, expNewFile, t) + validateFile(t, fh1.Name(), expExistFile) + validateFile(t, fh2, expNewFile) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) } func TestFileStdout(t *testing.T) { @@ -154,52 +154,52 @@ func TestFileStdout(t *testing.T) { } err := f.Connect() - assert.NoError(t, err) + require.NoError(t, err) err = f.Write(testutil.MockMetrics()) - assert.NoError(t, err) + require.NoError(t, err) err = f.Close() - assert.NoError(t, err) + require.NoError(t, err) outC := make(chan string) // copy the output in a separate goroutine so printing can't block indefinitely go func() { var buf bytes.Buffer - io.Copy(&buf, r) + _, err := io.Copy(&buf, r) + require.NoError(t, err) outC <- buf.String() }() // back to normal state - w.Close() + err = w.Close() + require.NoError(t, err) + // restoring the real stdout os.Stdout = old out := <-outC - assert.Equal(t, expNewFile, out) + require.Equal(t, expNewFile, out) } -func createFile() *os.File { +func createFile(t *testing.T) *os.File { f, err := os.CreateTemp("", "") - if err != nil { - panic(err) - } - f.WriteString("cpu,cpu=cpu0 value=100 1455312810012459582\n") + require.NoError(t, err) + + _, err = f.WriteString("cpu,cpu=cpu0 value=100 1455312810012459582\n") + require.NoError(t, err) return f } -func tmpFile() string { +func tmpFile(t *testing.T) string { d, err := os.MkdirTemp("", "") - if err != nil { - panic(err) - } + require.NoError(t, err) + return d + internal.RandomString(10) } -func validateFile(fname, expS string, t *testing.T) { - buf, err := os.ReadFile(fname) - if err != nil { - panic(err) - } - assert.Equal(t, expS, string(buf)) +func validateFile(t *testing.T, fileName, expS string) { + buf, err := os.ReadFile(fileName) + require.NoError(t, err) + require.Equal(t, expS, string(buf)) } From 4a0397342adf4deb01c7b4b348d94ef62868a3b5 Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Thu, 18 Nov 2021 17:33:47 +0100 Subject: [PATCH 051/133] fix: update shirou/gopsutil to v3 (#10119) --- go.mod | 7 ++++--- go.sum | 14 +++++++++----- plugins/inputs/cpu/cpu.go | 2 +- plugins/inputs/cpu/cpu_test.go | 2 +- plugins/inputs/disk/disk.go | 6 +----- plugins/inputs/disk/disk_test.go | 18 +++++++++--------- plugins/inputs/diskio/diskio_test.go | 2 +- plugins/inputs/mem/memory.go | 14 +++++++------- plugins/inputs/mem/memory_test.go | 16 ++++++++-------- plugins/inputs/net/net_test.go | 2 +- plugins/inputs/procstat/native_finder.go | 2 +- plugins/inputs/procstat/process.go | 4 ++-- plugins/inputs/procstat/procstat.go | 2 +- plugins/inputs/procstat/procstat_test.go | 4 ++-- plugins/inputs/swap/swap_test.go | 2 +- plugins/inputs/system/mock_PS.go | 12 ++++++------ plugins/inputs/system/ps.go | 12 ++++++------ plugins/inputs/system/system.go | 6 +++--- plugins/inputs/temp/temp_test.go | 2 +- 19 files changed, 65 insertions(+), 64 deletions(-) diff --git a/go.mod b/go.mod index 1ae30029ffadd..cdbeb33266c42 100644 --- a/go.mod +++ b/go.mod @@ -103,7 +103,7 @@ require ( github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-logr/logr v0.4.0 // indirect - github.com/go-ole/go-ole v1.2.5 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 @@ -232,7 +232,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil v3.21.8+incompatible + github.com/shirou/gopsutil/v3 v3.21.10 github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect @@ -283,7 +283,7 @@ require ( golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef + golang.org/x/sys v0.0.0-20211013075003-97ac67df715c golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect @@ -343,6 +343,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect go.opentelemetry.io/otel v1.0.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect diff --git a/go.sum b/go.sum index 1101853e2efa8..2423bce22dc89 100644 --- a/go.sum +++ b/go.sum @@ -795,8 +795,9 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -1430,6 +1431,8 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1888,10 +1891,11 @@ github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvh github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= -github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -2550,8 +2554,8 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index 9e795c82a589d..8c22bb923506e 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - cpuUtil "github.com/shirou/gopsutil/cpu" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index e51660a0adee6..3dc3242a6ed94 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - cpuUtil "github.com/shirou/gopsutil/cpu" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/system" diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 0a0fbf6f728a3..fc552a232b799 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -52,7 +52,7 @@ func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { // Skip dummy filesystem (procfs, cgroupfs, ...) continue } - mountOpts := parseOptions(partitions[i].Opts) + mountOpts := MountOptions(partitions[i].Opts) tags := map[string]string{ "path": du.Path, "device": strings.Replace(partitions[i].Device, "/dev/", "", -1), @@ -101,10 +101,6 @@ func (opts MountOptions) exists(opt string) bool { return false } -func parseOptions(opts string) MountOptions { - return strings.Split(opts, ",") -} - func init() { ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index 47a822b4410bf..22dd947406ff5 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - diskUtil "github.com/shirou/gopsutil/disk" + diskUtil "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -30,13 +30,13 @@ func TestDiskUsage(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, }, } duAll := []diskUtil.UsageStat{ @@ -137,7 +137,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -169,7 +169,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/hostfs/var", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -202,7 +202,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/hostfs", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -301,13 +301,13 @@ func TestDiskStats(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, }, } @@ -316,7 +316,7 @@ func TestDiskStats(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, } diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index 3ad203de09362..383e7e81044ec 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index d01bf2a0fa156..84fcbc32eb3ea 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -79,16 +79,16 @@ func (ms *MemStats) Gather(acc telegraf.Accumulator) error { fields["page_tables"] = vm.PageTables fields["shared"] = vm.Shared fields["slab"] = vm.Slab - fields["sreclaimable"] = vm.SReclaimable - fields["sunreclaim"] = vm.SUnreclaim + fields["sreclaimable"] = vm.Sreclaimable + fields["sunreclaim"] = vm.Sunreclaim fields["swap_cached"] = vm.SwapCached fields["swap_free"] = vm.SwapFree fields["swap_total"] = vm.SwapTotal - fields["vmalloc_chunk"] = vm.VMallocChunk - fields["vmalloc_total"] = vm.VMallocTotal - fields["vmalloc_used"] = vm.VMallocUsed - fields["write_back_tmp"] = vm.WritebackTmp - fields["write_back"] = vm.Writeback + fields["vmalloc_chunk"] = vm.VmallocChunk + fields["vmalloc_total"] = vm.VmallocTotal + fields["vmalloc_used"] = vm.VmallocUsed + fields["write_back_tmp"] = vm.WriteBackTmp + fields["write_back"] = vm.WriteBack } acc.AddGauge("mem", fields, nil) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go index 626a1806c4055..06561875753c9 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) @@ -42,16 +42,16 @@ func TestMemStats(t *testing.T) { Mapped: 42236, PageTables: 1236, Shared: 0, - SReclaimable: 1923022848, - SUnreclaim: 157728768, + Sreclaimable: 1923022848, + Sunreclaim: 157728768, SwapCached: 0, SwapFree: 524280, SwapTotal: 524280, - VMallocChunk: 3872908, - VMallocTotal: 3874808, - VMallocUsed: 1416, - Writeback: 0, - WritebackTmp: 0, + VmallocChunk: 3872908, + VmallocTotal: 3874808, + VmallocUsed: 1416, + WriteBack: 0, + WriteBackTmp: 0, } mps.On("VMStat").Return(vms, nil) diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 3c4c3c7ef8d84..9ef3b6fb0d91c 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -6,7 +6,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/net" + "github.com/shirou/gopsutil/v3/net" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 05cf4a72735f0..041e2cae91888 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/process" ) //NativeFinder uses gopsutil to find processes diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 93c64882ae835..f31cef4abe1c6 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" ) type Process interface { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 09b5cc7cfa325..915a1b13f44b4 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/process" ) var ( diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 237087aa577dc..5b67232156bc1 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" diff --git a/plugins/inputs/swap/swap_test.go b/plugins/inputs/swap/swap_test.go index 3f97b354e86b4..85a8adb5c184c 100644 --- a/plugins/inputs/swap/swap_test.go +++ b/plugins/inputs/swap/swap_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index e1bd4f84b48e7..765daa817a662 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -5,13 +5,13 @@ import ( "github.com/stretchr/testify/mock" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/disk" + "github.com/shirou/gopsutil/v3/host" - "github.com/shirou/gopsutil/load" - "github.com/shirou/gopsutil/mem" - "github.com/shirou/gopsutil/net" + "github.com/shirou/gopsutil/v3/load" + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/net" ) type MockPS struct { diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 70605cf4fc10a..187fec3d7a794 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -5,13 +5,13 @@ import ( "path/filepath" "strings" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/disk" - "github.com/shirou/gopsutil/host" - "github.com/shirou/gopsutil/mem" - "github.com/shirou/gopsutil/net" - "github.com/influxdata/telegraf/internal" + + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/disk" + "github.com/shirou/gopsutil/v3/host" + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/net" ) type PS interface { diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index ded0e8ba18a22..7bf3c154e72a1 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -10,9 +10,9 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/host" - "github.com/shirou/gopsutil/load" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/host" + "github.com/shirou/gopsutil/v3/load" ) type SystemStats struct { diff --git a/plugins/inputs/temp/temp_test.go b/plugins/inputs/temp/temp_test.go index 9ced8ac14a2ef..c73550f5d318c 100644 --- a/plugins/inputs/temp/temp_test.go +++ b/plugins/inputs/temp/temp_test.go @@ -3,7 +3,7 @@ package temp import ( "testing" - "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/v3/host" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/system" From 8710d2353eb419ff0b8e3e5300e56a60019fd2d0 Mon Sep 17 00:00:00 2001 From: reimda Date: Thu, 18 Nov 2021 13:56:18 -0700 Subject: [PATCH 052/133] fix: skip knxlistener when writing the sample config (#10131) --- config/config.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config/config.go b/config/config.go index d6081aedcfaf3..9333e32ab0b9a 100644 --- a/config/config.go +++ b/config/config.go @@ -560,9 +560,13 @@ func printFilteredInputs(inputFilters []string, commented bool) { // Print Inputs for _, pname := range pnames { + // Skip inputs that are registered twice for backward compatibility if pname == "cisco_telemetry_gnmi" { continue } + if pname == "KNXListener" { + continue + } creator := inputs.Inputs[pname] input := creator() From a439841015ecd16b1dd6828648c0558513c0e03b Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 18 Nov 2021 15:10:07 -0700 Subject: [PATCH 053/133] fix: update makefile indents to not always run which (#10126) --- Makefile | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 52362a307790c..7acd336cba7bc 100644 --- a/Makefile +++ b/Makefile @@ -148,26 +148,26 @@ lint-install: .PHONY: lint lint: -ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 -endif + ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please run: make lint-install) + exit 1 + endif golangci-lint run -ifeq (, $(shell which markdownlint-cli)) - $(info markdownlint-cli can't be found, please run: make lint-install) - exit 1 -endif + ifeq (, $(shell which markdownlint)) + $(info markdownlint can't be found, please run: make lint-install) + exit 1 + endif markdownlint-cli .PHONY: lint-branch lint-branch: -ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 -endif + ifeq (, $(shell which golangci-lint)) + $(info golangci-lint can't be found, please run: make lint-install) + exit 1 + endif golangci-lint run --new-from-rev master From 4f2ade5305c9d11acfcd321bd118f787cf7b9898 Mon Sep 17 00:00:00 2001 From: Nicolas Filotto Date: Thu, 18 Nov 2021 23:37:59 +0100 Subject: [PATCH 054/133] feat: Add support of aggregator as Starlark script (#9419) --- plugins/aggregators/all/all.go | 1 + plugins/aggregators/starlark/README.md | 103 +++++ plugins/aggregators/starlark/starlark.go | 144 ++++++ plugins/aggregators/starlark/starlark_test.go | 432 ++++++++++++++++++ .../aggregators/starlark/testdata/merge.star | 31 ++ .../starlark/testdata/min_max.star | 53 +++ .../starlark/builtins.go | 32 +- .../starlark/field_dict.go | 24 + .../starlark/logging.go | 0 .../{processors => common}/starlark/metric.go | 0 plugins/common/starlark/starlark.go | 182 ++++++++ .../starlark/tag_dict.go | 24 + plugins/processors/starlark/starlark.go | 151 +----- plugins/processors/starlark/starlark_test.go | 203 ++++---- 14 files changed, 1123 insertions(+), 257 deletions(-) create mode 100644 plugins/aggregators/starlark/README.md create mode 100644 plugins/aggregators/starlark/starlark.go create mode 100644 plugins/aggregators/starlark/starlark_test.go create mode 100644 plugins/aggregators/starlark/testdata/merge.star create mode 100644 plugins/aggregators/starlark/testdata/min_max.star rename plugins/{processors => common}/starlark/builtins.go (91%) rename plugins/{processors => common}/starlark/field_dict.go (91%) rename plugins/{processors => common}/starlark/logging.go (100%) rename plugins/{processors => common}/starlark/metric.go (100%) create mode 100644 plugins/common/starlark/starlark.go rename plugins/{processors => common}/starlark/tag_dict.go (87%) diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index 20d5b5ea2e482..c3a6f274b426d 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -9,5 +9,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" + _ "github.com/influxdata/telegraf/plugins/aggregators/starlark" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/starlark/README.md b/plugins/aggregators/starlark/README.md new file mode 100644 index 0000000000000..01bcf963c1258 --- /dev/null +++ b/plugins/aggregators/starlark/README.md @@ -0,0 +1,103 @@ +# Starlark Aggregator + +The `starlark` aggregator allows to implement a custom aggregator plugin with a Starlark script. The Starlark +script needs to be composed of the three methods defined in the Aggregator plugin interface which are `add`, `push` and `reset`. + +The Starlark Aggregator plugin calls the Starlark function `add` to add the metrics to the aggregator, then calls the Starlark function `push` to push the resulting metrics into the accumulator and finally calls the Starlark function `reset` to reset the entire state of the plugin. + +The Starlark functions can use the global function `state` to keep temporary the metrics to aggregate. + +The Starlark language is a dialect of Python, and will be familiar to those who +have experience with the Python language. However, there are major [differences](#python-differences). +Existing Python code is unlikely to work unmodified. The execution environment +is sandboxed, and it is not possible to do I/O operations such as reading from +files or sockets. + +The **[Starlark specification][]** has details about the syntax and available +functions. + +## Configuration + +```toml +[[aggregators.starlark]] + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true +``` + +## Usage + +The Starlark code should contain a function called `add` that takes a metric as argument. +The function will be called with each metric to add, and doesn't return anything. + +```python +def add(metric): + state["last"] = metric +``` + +The Starlark code should also contain a function called `push` that doesn't take any argument. +The function will be called to compute the aggregation, and returns the metrics to push to the accumulator. + +```python +def push(): + return state.get("last") +``` + +The Starlark code should also contain a function called `reset` that doesn't take any argument. +The function will be called to reset the plugin, and doesn't return anything. + +```python +def push(): + state.clear() +``` + +For a list of available types and functions that can be used in the code, see +the [Starlark specification][]. + +## Python Differences + +Refer to the section [Python Differences](plugins/processors/starlark/README.md#python-differences) of the documentation about the Starlark processor. + +## Libraries available + +Refer to the section [Libraries available](plugins/processors/starlark/README.md#libraries-available) of the documentation about the Starlark processor. + +## Common Questions + +Refer to the section [Common Questions](plugins/processors/starlark/README.md#common-questions) of the documentation about the Starlark processor. + +## Examples + +- [minmax](/plugins/aggregators/starlark/testdata/min_max.star) - A minmax aggregator implemented with a Starlark script. +- [merge](/plugins/aggregators/starlark/testdata/merge.star) - A merge aggregator implemented with a Starlark script. + +[All examples](/plugins/aggregators/starlark/testdata) are in the testdata folder. + +Open a Pull Request to add any other useful Starlark examples. + +[Starlark specification]: https://github.com/google/starlark-go/blob/master/doc/spec.md +[dict]: https://github.com/google/starlark-go/blob/master/doc/spec.md#dictionaries diff --git a/plugins/aggregators/starlark/starlark.go b/plugins/aggregators/starlark/starlark.go new file mode 100644 index 0000000000000..9fa7d9d62c94d --- /dev/null +++ b/plugins/aggregators/starlark/starlark.go @@ -0,0 +1,144 @@ +package starlark //nolint - Needed to avoid getting import-shadowing: The name 'starlark' shadows an import name (revive) + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" + common "github.com/influxdata/telegraf/plugins/common/starlark" + "go.starlark.net/starlark" +) + +const ( + description = "Aggregate metrics using a Starlark script" + sampleConfig = ` + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true +` +) + +type Starlark struct { + common.StarlarkCommon +} + +func (s *Starlark) Init() error { + // Execute source + err := s.StarlarkCommon.Init() + if err != nil { + return err + } + + // The source should define an add function. + err = s.AddFunction("add", &common.Metric{}) + if err != nil { + return err + } + + // The source should define a push function. + err = s.AddFunction("push") + if err != nil { + return err + } + + // The source should define a reset function. + err = s.AddFunction("reset") + if err != nil { + return err + } + + return nil +} + +func (s *Starlark) SampleConfig() string { + return sampleConfig +} + +func (s *Starlark) Description() string { + return description +} + +func (s *Starlark) Add(metric telegraf.Metric) { + parameters, found := s.GetParameters("add") + if !found { + s.Log.Errorf("The parameters of the add function could not be found") + return + } + parameters[0].(*common.Metric).Wrap(metric) + + _, err := s.Call("add") + if err != nil { + s.LogError(err) + } +} + +func (s *Starlark) Push(acc telegraf.Accumulator) { + rv, err := s.Call("push") + if err != nil { + s.LogError(err) + acc.AddError(err) + return + } + + switch rv := rv.(type) { + case *starlark.List: + iter := rv.Iterate() + defer iter.Done() + var v starlark.Value + for iter.Next(&v) { + switch v := v.(type) { + case *common.Metric: + m := v.Unwrap() + acc.AddMetric(m) + default: + s.Log.Errorf("Invalid type returned in list: %s", v.Type()) + } + } + case *common.Metric: + m := rv.Unwrap() + acc.AddMetric(m) + case starlark.NoneType: + default: + s.Log.Errorf("Invalid type returned: %T", rv) + } +} + +func (s *Starlark) Reset() { + _, err := s.Call("reset") + if err != nil { + s.LogError(err) + } +} + +// init initializes starlark aggregator plugin +func init() { + aggregators.Add("starlark", func() telegraf.Aggregator { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + }, + } + }) +} diff --git a/plugins/aggregators/starlark/starlark_test.go b/plugins/aggregators/starlark/starlark_test.go new file mode 100644 index 0000000000000..a45f9e84cd515 --- /dev/null +++ b/plugins/aggregators/starlark/starlark_test.go @@ -0,0 +1,432 @@ +package starlark + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + common "github.com/influxdata/telegraf/plugins/common/starlark" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var m1 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(1), + "c": int64(1), + "d": int64(1), + "e": int64(1), + "f": int64(2), + "g": int64(2), + "h": int64(2), + "i": int64(2), + "j": int64(3), + }, + time.Now(), +) +var m2 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(3), + "c": int64(3), + "d": int64(3), + "e": int64(3), + "f": int64(1), + "g": int64(1), + "h": int64(1), + "i": int64(1), + "j": int64(1), + "k": int64(200), + "l": int64(200), + "ignoreme": "string", + "andme": true, + }, + time.Now(), +) + +func BenchmarkApply(b *testing.B) { + minmax, _ := newMinMax() + + for n := 0; n < b.N; n++ { + minmax.Add(m1) + minmax.Add(m2) + } +} + +// Test two metrics getting added. +func TestMinMaxWithPeriod(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + + minmax.Add(m1) + minmax.Add(m2) + minmax.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(1), + "c_max": int64(3), + "c_min": int64(1), + "d_max": int64(3), + "d_min": int64(1), + "e_max": int64(3), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(1), + "g_max": int64(2), + "g_min": int64(1), + "h_max": int64(2), + "h_min": int64(1), + "i_max": int64(2), + "i_min": int64(1), + "j_max": int64(3), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test two metrics getting added with a push/reset in between (simulates +// getting added in different periods.) +func TestMinMaxDifferentPeriods(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + minmax.Add(m1) + minmax.Push(&acc) + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(1), + "b_min": int64(1), + "c_max": int64(1), + "c_min": int64(1), + "d_max": int64(1), + "d_min": int64(1), + "e_max": int64(1), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(2), + "g_max": int64(2), + "g_min": int64(2), + "h_max": int64(2), + "h_min": int64(2), + "i_max": int64(2), + "i_min": int64(2), + "j_max": int64(3), + "j_min": int64(3), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) + + acc.ClearMetrics() + minmax.Reset() + minmax.Add(m2) + minmax.Push(&acc) + expectedFields = map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(3), + "c_max": int64(3), + "c_min": int64(3), + "d_max": int64(3), + "d_min": int64(3), + "e_max": int64(3), + "e_min": int64(3), + "f_max": int64(1), + "f_min": int64(1), + "g_max": int64(1), + "g_min": int64(1), + "h_max": int64(1), + "h_min": int64(1), + "i_max": int64(1), + "i_min": int64(1), + "j_max": int64(1), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags = map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func newMinMax() (*Starlark, error) { + return newStarlarkFromScript("testdata/min_max.star") +} + +func TestSimple(t *testing.T) { + plugin, err := newMerge() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestNanosecondPrecision(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + acc.SetPrecision(time.Second) + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 1), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestReset(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + plugin.Reset() + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func newMerge() (*Starlark, error) { + return newStarlarkFromScript("testdata/merge.star") +} + +func TestLastFromSource(t *testing.T) { + acc := testutil.Accumulator{} + plugin, err := newStarlarkFromSource(` +state = {} +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +`) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_idle": 31, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Push(&acc) + expectedFields := map[string]interface{}{ + "time_idle": int64(31), + } + expectedTags := map[string]string{ + "cpu": "cpu2", + } + acc.AssertContainsTaggedFields(t, "cpu", expectedFields, expectedTags) + plugin.Reset() +} + +func newStarlarkFromSource(source string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Source: source, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} + +func newStarlarkFromScript(script string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Script: script, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} diff --git a/plugins/aggregators/starlark/testdata/merge.star b/plugins/aggregators/starlark/testdata/merge.star new file mode 100644 index 0000000000000..77c5148ca9f76 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/merge.star @@ -0,0 +1,31 @@ +# Example of a merge aggregator implemented with a starlark script. +load('time.star', 'time') +state = {} +def add(metric): + metrics = state.get("metrics") + if metrics == None: + metrics = {} + state["metrics"] = metrics + state["ordered"] = [] + gId = groupID(metric) + m = metrics.get(gId) + if m == None: + m = deepcopy(metric) + metrics[gId] = m + state["ordered"].append(m) + else: + for k, v in metric.fields.items(): + m.fields[k] = v + +def push(): + return state.get("ordered") + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + "-" + key = key + "-" + str(metric.time) + return hash(key) \ No newline at end of file diff --git a/plugins/aggregators/starlark/testdata/min_max.star b/plugins/aggregators/starlark/testdata/min_max.star new file mode 100644 index 0000000000000..f8b23355c8e51 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/min_max.star @@ -0,0 +1,53 @@ +# Example of a min_max aggregator implemented with a starlark script. + +supported_types = (["int", "float"]) +state = {} +def add(metric): + gId = groupID(metric) + aggregate = state.get(gId) + if aggregate == None: + aggregate = { + "name": metric.name, + "tags": metric.tags, + "fields": {} + } + for k, v in metric.fields.items(): + if type(v) in supported_types: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + state[gId] = aggregate + else: + for k, v in metric.fields.items(): + if type(v) in supported_types: + min_max = aggregate["fields"].get(k) + if min_max == None: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + elif v < min_max["min"]: + aggregate["fields"][k]["min"] = v + elif v > min_max["max"]: + aggregate["fields"][k]["max"] = v + +def push(): + metrics = [] + for a in state: + fields = {} + for k in state[a]["fields"]: + fields[k + "_min"] = state[a]["fields"][k]["min"] + fields[k + "_max"] = state[a]["fields"][k]["max"] + m = Metric(state[a]["name"], state[a]["tags"], fields) + metrics.append(m) + return metrics + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + return hash(key) \ No newline at end of file diff --git a/plugins/processors/starlark/builtins.go b/plugins/common/starlark/builtins.go similarity index 91% rename from plugins/processors/starlark/builtins.go rename to plugins/common/starlark/builtins.go index 6876fe9636ab5..7adcd115d13ff 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/common/starlark/builtins.go @@ -10,16 +10,42 @@ import ( ) func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name starlark.String - if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil { + var ( + name starlark.String + tags, fields starlark.Value + ) + if err := starlark.UnpackArgs("Metric", args, kwargs, "name", &name, "tags?", &tags, "fields?", &fields); err != nil { return nil, err } - m := metric.New(string(name), nil, nil, time.Now()) + allFields, err := toFields(fields) + if err != nil { + return nil, err + } + allTags, err := toTags(tags) + if err != nil { + return nil, err + } + + m := metric.New(string(name), allTags, allFields, time.Now()) return &Metric{metric: m}, nil } +func toString(value starlark.Value, errorMsg string) (string, error) { + if value, ok := value.(starlark.String); ok { + return string(value), nil + } + return "", fmt.Errorf(errorMsg, value) +} + +func items(value starlark.Value, errorMsg string) ([]starlark.Tuple, error) { + if iter, ok := value.(starlark.IterableMapping); ok { + return iter.Items(), nil + } + return nil, fmt.Errorf(errorMsg, value) +} + func deepcopy(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *Metric if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil { diff --git a/plugins/processors/starlark/field_dict.go b/plugins/common/starlark/field_dict.go similarity index 91% rename from plugins/processors/starlark/field_dict.go rename to plugins/common/starlark/field_dict.go index 4a332b8268d9d..08f6249023e17 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/common/starlark/field_dict.go @@ -274,3 +274,27 @@ func asGoValue(value interface{}) (interface{}, error) { return nil, errors.New("invalid starlark type") } + +// ToFields converts a starlark.Value to a map of values. +func toFields(value starlark.Value) (map[string]interface{}, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of fields") + if err != nil { + return nil, err + } + result := make(map[string]interface{}, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for fields") + if err != nil { + return nil, err + } + value, err := asGoValue(item[1]) + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/processors/starlark/logging.go b/plugins/common/starlark/logging.go similarity index 100% rename from plugins/processors/starlark/logging.go rename to plugins/common/starlark/logging.go diff --git a/plugins/processors/starlark/metric.go b/plugins/common/starlark/metric.go similarity index 100% rename from plugins/processors/starlark/metric.go rename to plugins/common/starlark/metric.go diff --git a/plugins/common/starlark/starlark.go b/plugins/common/starlark/starlark.go new file mode 100644 index 0000000000000..5f365519871d0 --- /dev/null +++ b/plugins/common/starlark/starlark.go @@ -0,0 +1,182 @@ +package starlark //nolint - Needed to avoid getting import-shadowing: The name 'starlark' shadows an import name (revive) + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + "go.starlark.net/lib/math" + "go.starlark.net/lib/time" + "go.starlark.net/resolve" + "go.starlark.net/starlark" + "go.starlark.net/starlarkjson" +) + +type StarlarkCommon struct { + Source string `toml:"source"` + Script string `toml:"script"` + Constants map[string]interface{} `toml:"constants"` + + Log telegraf.Logger `toml:"-"` + StarlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) + + thread *starlark.Thread + globals starlark.StringDict + functions map[string]*starlark.Function + parameters map[string]starlark.Tuple +} + +func (s *StarlarkCommon) Init() error { + if s.Source == "" && s.Script == "" { + return errors.New("one of source or script must be set") + } + if s.Source != "" && s.Script != "" { + return errors.New("both source or script cannot be set") + } + + s.thread = &starlark.Thread{ + Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, + Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { + return s.StarlarkLoadFunc(module, s.Log) + }, + } + + builtins := starlark.StringDict{} + builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) + builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + builtins["catch"] = starlark.NewBuiltin("catch", catch) + err := s.addConstants(&builtins) + if err != nil { + return err + } + + program, err := s.sourceProgram(builtins, "") + if err != nil { + return err + } + + // Execute source + globals, err := program.Init(s.thread, builtins) + if err != nil { + return err + } + // Make available a shared state to the apply function + globals["state"] = starlark.NewDict(0) + + // Freeze the global state. This prevents modifications to the processor + // state and prevents scripts from containing errors storing tracking + // metrics. Tasks that require global state will not be possible due to + // this, so maybe we should relax this in the future. + globals.Freeze() + + s.globals = globals + s.functions = make(map[string]*starlark.Function) + s.parameters = make(map[string]starlark.Tuple) + return nil +} + +func (s *StarlarkCommon) GetParameters(name string) (starlark.Tuple, bool) { + parameters, found := s.parameters[name] + return parameters, found +} + +func (s *StarlarkCommon) AddFunction(name string, params ...starlark.Value) error { + globalFn, found := s.globals[name] + if !found { + return fmt.Errorf("%s is not defined", name) + } + + fn, found := globalFn.(*starlark.Function) + if !found { + return fmt.Errorf("%s is not a function", name) + } + + if fn.NumParams() != len(params) { + return fmt.Errorf("%s function must take %d parameter(s)", name, len(params)) + } + p := make(starlark.Tuple, len(params)) + for i, param := range params { + p[i] = param + } + s.functions[name] = fn + s.parameters[name] = params + return nil +} + +// Add all the constants defined in the plugin as constants of the script +func (s *StarlarkCommon) addConstants(builtins *starlark.StringDict) error { + for key, val := range s.Constants { + sVal, err := asStarlarkValue(val) + if err != nil { + return fmt.Errorf("converting type %T failed: %v", val, err) + } + (*builtins)[key] = sVal + } + return nil +} + +func (s *StarlarkCommon) sourceProgram(builtins starlark.StringDict, filename string) (*starlark.Program, error) { + var src interface{} + if s.Source != "" { + src = s.Source + } + _, program, err := starlark.SourceProgram(s.Script, src, builtins.Has) + return program, err +} + +// Call calls the function corresponding to the given name. +func (s *StarlarkCommon) Call(name string) (starlark.Value, error) { + fn, ok := s.functions[name] + if !ok { + return nil, fmt.Errorf("function %q does not exist", name) + } + args, ok := s.parameters[name] + if !ok { + return nil, fmt.Errorf("params for function %q do not exist", name) + } + return starlark.Call(s.thread, fn, args, nil) +} + +func (s *StarlarkCommon) LogError(err error) { + if err, ok := err.(*starlark.EvalError); ok { + for _, line := range strings.Split(err.Backtrace(), "\n") { + s.Log.Error(line) + } + } else { + s.Log.Error(err.Msg) + } +} + +func LoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { + switch module { + case "json.star": + return starlark.StringDict{ + "json": starlarkjson.Module, + }, nil + case "logging.star": + return starlark.StringDict{ + "log": LogModule(logger), + }, nil + case "math.star": + return starlark.StringDict{ + "math": math.Module, + }, nil + case "time.star": + return starlark.StringDict{ + "time": time.Module, + }, nil + default: + return nil, errors.New("module " + module + " is not available") + } +} + +func init() { + // https://github.com/bazelbuild/starlark/issues/20 + resolve.AllowNestedDef = true + resolve.AllowLambda = true + resolve.AllowFloat = true + resolve.AllowSet = true + resolve.AllowGlobalReassign = true + resolve.AllowRecursion = true +} diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/common/starlark/tag_dict.go similarity index 87% rename from plugins/processors/starlark/tag_dict.go rename to plugins/common/starlark/tag_dict.go index 7dbb8c12d0ed6..999f8736575db 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/common/starlark/tag_dict.go @@ -196,3 +196,27 @@ func (i *TagIterator) Next(p *starlark.Value) bool { func (i *TagIterator) Done() { i.tagIterCount-- } + +// ToTags converts a starlark.Value to a map of string. +func toTags(value starlark.Value) (map[string]string, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of tags") + if err != nil { + return nil, err + } + result := make(map[string]string, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for tags") + if err != nil { + return nil, err + } + value, err := toString(item[1], "The type %T is unsupported as type of value for tags") + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/processors/starlark/starlark.go b/plugins/processors/starlark/starlark.go index 44f78fa6b6988..5bf441f2fd05b 100644 --- a/plugins/processors/starlark/starlark.go +++ b/plugins/processors/starlark/starlark.go @@ -1,17 +1,12 @@ package starlark import ( - "errors" "fmt" - "strings" "github.com/influxdata/telegraf" + common "github.com/influxdata/telegraf/plugins/common/starlark" "github.com/influxdata/telegraf/plugins/processors" - "go.starlark.net/lib/math" - "go.starlark.net/lib/time" - "go.starlark.net/resolve" "go.starlark.net/starlark" - "go.starlark.net/starlarkjson" ) const ( @@ -40,97 +35,29 @@ def apply(metric): ) type Starlark struct { - Source string `toml:"source"` - Script string `toml:"script"` - Constants map[string]interface{} `toml:"constants"` + common.StarlarkCommon - Log telegraf.Logger `toml:"-"` - - thread *starlark.Thread - applyFunc *starlark.Function - args starlark.Tuple - results []telegraf.Metric - starlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) + results []telegraf.Metric } func (s *Starlark) Init() error { - if s.Source == "" && s.Script == "" { - return errors.New("one of source or script must be set") - } - if s.Source != "" && s.Script != "" { - return errors.New("both source or script cannot be set") - } - - s.thread = &starlark.Thread{ - Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, - Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { - return s.starlarkLoadFunc(module, s.Log) - }, - } - - builtins := starlark.StringDict{} - builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) - builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) - builtins["catch"] = starlark.NewBuiltin("catch", catch) - s.addConstants(&builtins) - - program, err := s.sourceProgram(builtins) + err := s.StarlarkCommon.Init() if err != nil { return err } - // Execute source - globals, err := program.Init(s.thread, builtins) + // The source should define an apply function. + err = s.AddFunction("apply", &common.Metric{}) if err != nil { return err } - // Make available a shared state to the apply function - globals["state"] = starlark.NewDict(0) - - // Freeze the global state. This prevents modifications to the processor - // state and prevents scripts from containing errors storing tracking - // metrics. Tasks that require global state will not be possible due to - // this, so maybe we should relax this in the future. - globals.Freeze() - - // The source should define an apply function. - apply := globals["apply"] - - if apply == nil { - return errors.New("apply is not defined") - } - - var ok bool - if s.applyFunc, ok = apply.(*starlark.Function); !ok { - return errors.New("apply is not a function") - } - - if s.applyFunc.NumParams() != 1 { - return errors.New("apply function must take one parameter") - } - - // Reusing the same metric wrapper to skip an allocation. This will cause - // any saved references to point to the new metric, but due to freezing the - // globals none should exist. - s.args = make(starlark.Tuple, 1) - s.args[0] = &Metric{} - // Preallocate a slice for return values. s.results = make([]telegraf.Metric, 0, 10) return nil } -func (s *Starlark) sourceProgram(builtins starlark.StringDict) (*starlark.Program, error) { - if s.Source != "" { - _, program, err := starlark.SourceProgram("processor.starlark", s.Source, builtins.Has) - return program, err - } - _, program, err := starlark.SourceProgram(s.Script, nil, builtins.Has) - return program, err -} - func (s *Starlark) SampleConfig() string { return sampleConfig } @@ -144,15 +71,15 @@ func (s *Starlark) Start(_ telegraf.Accumulator) error { } func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { - s.args[0].(*Metric).Wrap(metric) + parameters, found := s.GetParameters("apply") + if !found { + return fmt.Errorf("The parameters of the apply function could not be found") + } + parameters[0].(*common.Metric).Wrap(metric) - rv, err := starlark.Call(s.thread, s.applyFunc, s.args, nil) + rv, err := s.Call("apply") if err != nil { - if err, ok := err.(*starlark.EvalError); ok { - for _, line := range strings.Split(err.Backtrace(), "\n") { - s.Log.Error(line) - } - } + s.LogError(err) metric.Reject() return err } @@ -164,7 +91,7 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { var v starlark.Value for iter.Next(&v) { switch v := v.(type) { - case *Metric: + case *common.Metric: m := v.Unwrap() if containsMetric(s.results, m) { s.Log.Errorf("Duplicate metric reference detected") @@ -188,7 +115,7 @@ func (s *Starlark) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { s.results[i] = nil } s.results = s.results[:0] - case *Metric: + case *common.Metric: m := rv.Unwrap() // If the script returned a different metric, mark this metric as @@ -209,17 +136,6 @@ func (s *Starlark) Stop() error { return nil } -// Add all the constants defined in the plugin as constants of the script -func (s *Starlark) addConstants(builtins *starlark.StringDict) { - for key, val := range s.Constants { - sVal, err := asStarlarkValue(val) - if err != nil { - s.Log.Errorf("Unsupported type: %T", val) - } - (*builtins)[key] = sVal - } -} - func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool { for _, m := range metrics { if m == metric { @@ -229,43 +145,12 @@ func containsMetric(metrics []telegraf.Metric, metric telegraf.Metric) bool { return false } -func init() { - // https://github.com/bazelbuild/starlark/issues/20 - resolve.AllowNestedDef = true - resolve.AllowLambda = true - resolve.AllowFloat = true - resolve.AllowSet = true - resolve.AllowGlobalReassign = true - resolve.AllowRecursion = true -} - func init() { processors.AddStreaming("starlark", func() telegraf.StreamingProcessor { return &Starlark{ - starlarkLoadFunc: loadFunc, + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + }, } }) } - -func loadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { - switch module { - case "json.star": - return starlark.StringDict{ - "json": starlarkjson.Module, - }, nil - case "logging.star": - return starlark.StringDict{ - "log": LogModule(logger), - }, nil - case "math.star": - return starlark.StringDict{ - "math": math.Module, - }, nil - case "time.star": - return starlark.StringDict{ - "time": time.Module, - }, nil - default: - return nil, errors.New("module " + module + " is not available") - } -} diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 6ad169bbf3f87..3a1f955a884c2 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -11,6 +11,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + common "github.com/influxdata/telegraf/plugins/common/starlark" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -22,78 +23,63 @@ import ( // Tests for runtime errors in the processors Init function. func TestInitError(t *testing.T) { tests := []struct { - name string - plugin *Starlark + name string + constants map[string]interface{} + plugin *Starlark }{ { - name: "source must define apply", - plugin: &Starlark{ - Source: "", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "source must define apply", + plugin: newStarlarkFromSource(""), }, { name: "apply must be a function", - plugin: &Starlark{ - Source: ` + plugin: newStarlarkFromSource(` apply = 42 -`, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, +`), }, { name: "apply function must take one arg", - plugin: &Starlark{ - Source: ` + plugin: newStarlarkFromSource(` def apply(): pass -`, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, +`), }, { name: "package scope must have valid syntax", - plugin: &Starlark{ - Source: ` + plugin: newStarlarkFromSource(` for -`, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, +`), }, { - name: "no source no script", - plugin: &Starlark{ - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "no source no script", + plugin: newStarlarkNoScript(), }, { name: "source and script", - plugin: &Starlark{ - Source: ` + plugin: newStarlarkFromSource(` def apply(): pass -`, - Script: "testdata/ratio.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, +`), }, { - name: "script file not found", - plugin: &Starlark{ - Script: "testdata/file_not_found.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, + name: "script file not found", + plugin: newStarlarkFromScript("testdata/file_not_found.star"), + }, + { + name: "source and script", + plugin: newStarlarkFromSource(` +def apply(metric): + metric.fields["p1"] = unsupported_type + return metric +`), + constants: map[string]interface{}{ + "unsupported_type": time.Now(), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + tt.plugin.Constants = tt.constants err := tt.plugin.Init() require.Error(t, err) }) @@ -227,11 +213,7 @@ def apply(metric): for _, tt := range applyTests { t.Run(tt.name, func(t *testing.T) { - plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - } + plugin := newStarlarkFromSource(tt.source) err := plugin.Init() require.NoError(t, err) @@ -2545,7 +2527,6 @@ def apply(metric): 2: "two", "3": "three", }, - "unsupported_type": time.Now(), }, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2575,12 +2556,8 @@ def apply(metric): for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, - Constants: tt.constants, - starlarkLoadFunc: testLoadFunc, - } + plugin := newStarlarkFromSource(tt.source) + plugin.Constants = tt.constants err := plugin.Init() require.NoError(t, err) @@ -2637,7 +2614,6 @@ def apply(metric): debug_mode = true supported_values = ["2", "3"] supported_entries = { "2" = "two", "3" = "three" } - unsupported_type = 2009-06-12 `, input: []telegraf.Metric{ testutil.MustMetric("cpu", @@ -2717,12 +2693,8 @@ func TestScript(t *testing.T) { expectedErrorStr string }{ { - name: "rename", - plugin: &Starlark{ - Script: "testdata/rename.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "rename", + plugin: newStarlarkFromScript("testdata/rename.star"), input: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{ @@ -2745,12 +2717,8 @@ func TestScript(t *testing.T) { }, }, { - name: "drop fields by type", - plugin: &Starlark{ - Script: "testdata/drop_string_fields.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "drop fields by type", + plugin: newStarlarkFromScript("testdata/drop_string_fields.star"), input: []telegraf.Metric{ testutil.MustMetric("device", map[string]string{}, @@ -2777,12 +2745,8 @@ func TestScript(t *testing.T) { }, }, { - name: "drop fields with unexpected type", - plugin: &Starlark{ - Script: "testdata/drop_fields_with_unexpected_type.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "drop fields with unexpected type", + plugin: newStarlarkFromScript("testdata/drop_fields_with_unexpected_type.star"), input: []telegraf.Metric{ testutil.MustMetric("device", map[string]string{}, @@ -2812,12 +2776,8 @@ func TestScript(t *testing.T) { }, }, { - name: "scale", - plugin: &Starlark{ - Script: "testdata/scale.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "scale", + plugin: newStarlarkFromScript("testdata/scale.star"), input: []telegraf.Metric{ testutil.MustMetric("cpu", map[string]string{}, @@ -2834,12 +2794,8 @@ func TestScript(t *testing.T) { }, }, { - name: "ratio", - plugin: &Starlark{ - Script: "testdata/ratio.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "ratio", + plugin: newStarlarkFromScript("testdata/ratio.star"), input: []telegraf.Metric{ testutil.MustMetric("mem", map[string]string{}, @@ -2863,12 +2819,8 @@ func TestScript(t *testing.T) { }, }, { - name: "logging", - plugin: &Starlark{ - Script: "testdata/logging.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "logging", + plugin: newStarlarkFromScript("testdata/logging.star"), input: []telegraf.Metric{ testutil.MustMetric("log", map[string]string{}, @@ -2889,12 +2841,8 @@ func TestScript(t *testing.T) { }, }, { - name: "multiple_metrics", - plugin: &Starlark{ - Script: "testdata/multiple_metrics.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "multiple_metrics", + plugin: newStarlarkFromScript("testdata/multiple_metrics.star"), input: []telegraf.Metric{ testutil.MustMetric("mm", map[string]string{}, @@ -2922,12 +2870,8 @@ func TestScript(t *testing.T) { }, }, { - name: "multiple_metrics_with_json", - plugin: &Starlark{ - Script: "testdata/multiple_metrics_with_json.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "multiple_metrics_with_json", + plugin: newStarlarkFromScript("testdata/multiple_metrics_with_json.star"), input: []telegraf.Metric{ testutil.MustMetric("json", map[string]string{}, @@ -2955,12 +2899,8 @@ func TestScript(t *testing.T) { }, }, { - name: "fail", - plugin: &Starlark{ - Script: "testdata/fail.star", - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - }, + name: "fail", + plugin: newStarlarkFromScript("testdata/fail.star"), input: []telegraf.Metric{ testutil.MustMetric("fail", map[string]string{}, @@ -3246,11 +3186,7 @@ def apply(metric): for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { - plugin := &Starlark{ - Source: tt.source, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - } + plugin := newStarlarkFromSource(tt.source) err := plugin.Init() require.NoError(b, err) @@ -3292,11 +3228,7 @@ func TestAllScriptTestData(t *testing.T) { if expectedErrorStr == "" { outputMetrics = parseMetricsFrom(t, lines, "Example Output:") } - plugin := &Starlark{ - Script: fn, - Log: testutil.Logger{}, - starlarkLoadFunc: testLoadFunc, - } + plugin := newStarlarkFromScript(fn) require.NoError(t, plugin.Init()) acc := &testutil.Accumulator{} @@ -3370,7 +3302,7 @@ func parseErrorMessage(t *testing.T, lines []string, header string) string { } func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { - result, err := loadFunc(module, logger) + result, err := common.LoadFunc(module, logger) if err != nil { return nil, err } @@ -3387,3 +3319,32 @@ func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, e func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil } + +func newStarlarkFromSource(source string) *Starlark { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: testLoadFunc, + Log: testutil.Logger{}, + Source: source, + }, + } +} + +func newStarlarkFromScript(script string) *Starlark { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: testLoadFunc, + Log: testutil.Logger{}, + Script: script, + }, + } +} + +func newStarlarkNoScript() *Starlark { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: testLoadFunc, + Log: testutil.Logger{}, + }, + } +} From 5f9bd0d9514e94389cb5c5e384a2856f644a4c31 Mon Sep 17 00:00:00 2001 From: Mya Date: Fri, 19 Nov 2021 12:26:19 -0700 Subject: [PATCH 055/133] chore: remove nolint (#10138) --- plugins/aggregators/starlark/starlark.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/aggregators/starlark/starlark.go b/plugins/aggregators/starlark/starlark.go index 9fa7d9d62c94d..2823d1ed73b9b 100644 --- a/plugins/aggregators/starlark/starlark.go +++ b/plugins/aggregators/starlark/starlark.go @@ -1,4 +1,4 @@ -package starlark //nolint - Needed to avoid getting import-shadowing: The name 'starlark' shadows an import name (revive) +package starlark import ( "github.com/influxdata/telegraf" From b89ef94777a56ae4bc52bbdcaaf574f5cc4e63f1 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 23 Nov 2021 08:20:39 -0700 Subject: [PATCH 056/133] feat: enable extracting tag values from MQTT topics (#9995) --- plugins/inputs/mqtt_consumer/README.md | 61 +++- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 261 ++++++++++++------ .../mqtt_consumer/mqtt_consumer_test.go | 134 ++++++++- 3 files changed, 359 insertions(+), 97 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 3fd128eb85e10..19b57f79a1a32 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -3,7 +3,7 @@ The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics and creates metrics using one of the supported [input data formats][]. -### Configuration +## Configuration ```toml [[inputs.mqtt_consumer]] @@ -73,6 +73,63 @@ and creates metrics using one of the supported [input data formats][]. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type +``` + +## About Topic Parsing + +The MQTT topic as a whole is stored as a tag, but this can be far too coarse +to be easily used when utilizing the data further down the line. This +change allows tag values to be extracted from the MQTT topic letting you +store the information provided in the topic in a meaningful way. An `_` denotes an +ignored entry in the topic path. Please see the following example. + +## Example Configuration for topic parsing + +```toml +[[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/+/cpu/23", + ] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "float" + + [[inputs.mqtt_consumer.topic_parsing]] + topic = "telegraf/one/cpu/23" + measurement = "_/_/measurement/_" + tags = "tag/_/_/_" + fields = "_/_/_/test" + [inputs.mqtt_consumer.topic_parsing.types] + test = "int" +``` + +Result: + +```shell +cpu,host=pop-os,tag=telegraf,topic=telegraf/one/cpu/23 value=45,test=23i 1637014942460689291 ``` ### Metrics @@ -80,5 +137,7 @@ and creates metrics using one of the supported [input data formats][]. - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` +- example when [[inputs.mqtt_consumer.topic_parsing]] is set + [mqtt]: https://mqtt.org [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 3e88cecbbce45..890ed9f5d1a34 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" + "strconv" "strings" "sync" "time" mqtt "github.com/eclipse/paho.mqtt.golang" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -20,8 +20,7 @@ import ( var ( // 30 Seconds is the default used by paho.mqtt.golang - defaultConnectionTimeout = config.Duration(30 * time.Second) - + defaultConnectionTimeout = config.Duration(30 * time.Second) defaultMaxUndeliveredMessages = 1000 ) @@ -41,42 +40,47 @@ type Client interface { AddRoute(topic string, callback mqtt.MessageHandler) Disconnect(quiesce uint) } - type ClientFactory func(o *mqtt.ClientOptions) Client - +type TopicParsingConfig struct { + Topic string `toml:"topic"` + Measurement string `toml:"measurement"` + Tags string `toml:"tags"` + Fields string `toml:"fields"` + FieldTypes map[string]string `toml:"types"` + // cached split of user given information + MeasurementIndex int + SplitTags []string + SplitFields []string + SplitTopic []string +} type MQTTConsumer struct { - Servers []string `toml:"servers"` - Topics []string `toml:"topics"` - TopicTag *string `toml:"topic_tag"` - Username string `toml:"username"` - Password string `toml:"password"` - QoS int `toml:"qos"` - ConnectionTimeout config.Duration `toml:"connection_timeout"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - - parser parsers.Parser - + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + TopicParsing []TopicParsingConfig `toml:"topic_parsing"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout config.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + parser parsers.Parser // Legacy metric buffer support; deprecated in v0.10.3 - MetricBuffer int - + MetricBuffer int PersistentSession bool ClientID string `toml:"client_id"` tls.ClientConfig - - Log telegraf.Logger - - clientFactory ClientFactory - client Client - opts *mqtt.ClientOptions - acc telegraf.TrackingAccumulator - state ConnectionState - sem semaphore - messages map[telegraf.TrackingID]bool - messagesMutex sync.Mutex - chosenTopicTag string - - ctx context.Context - cancel context.CancelFunc + Log telegraf.Logger + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool + messagesMutex sync.Mutex + topicTagParse string + ctx context.Context + cancel context.CancelFunc } var sampleConfig = ` @@ -86,18 +90,20 @@ var sampleConfig = ` ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] servers = ["tcp://127.0.0.1:1883"] - ## Topics that will be subscribed to. topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # topic_tags = "_/format/client/_" + # topic_measurement = "measurement/_/_/_" + # topic_fields = "_/_/_/temperature" ## The message topic will be stored in a tag specified by this value. If set ## to the empty string no topic tag will be created. # topic_tag = "topic" - ## QoS policy for messages ## 0 = at most once ## 1 = at least once @@ -106,10 +112,8 @@ var sampleConfig = ` ## When using a QoS of 1 or 2, you should enable persistent_session to allow ## resuming unacknowledged messages. # qos = 0 - ## Connection timeout for initial connection in seconds # connection_timeout = "30s" - ## Maximum messages to read from the broker that have not been written by an ## output. For best throughput set based on the number of metrics within ## each message and the size of the output's metric_batch_size. @@ -119,87 +123,103 @@ var sampleConfig = ` ## full batch is collected and the write is triggered immediately without ## waiting until the next flush_interval. # max_undelivered_messages = 1000 - ## Persistent session disables clearing of the client session on connection. ## In order for this option to work you must also set client_id to identify ## the client. To receive messages that arrived while the client is offline, ## also set the qos option to 1 or 2 and don't forget to also set the QoS when ## publishing. # persistent_session = false - ## If unset, a random client ID will be generated. # client_id = "" - ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + ## [[inputs.mqtt_consumer.topic_parsing]] + ## topic = "" + ## measurement = "" + ## tags = "" + ## fields = "" + ## [inputs.mqtt_consumer.topic_parsing.types] + ## ` func (m *MQTTConsumer) SampleConfig() string { return sampleConfig } - func (m *MQTTConsumer) Description() string { return "Read metrics from MQTT topic(s)" } - func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } - func (m *MQTTConsumer) Init() error { m.state = Disconnected - if m.PersistentSession && m.ClientID == "" { return errors.New("persistent_session requires client_id") } - if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } - if time.Duration(m.ConnectionTimeout) < 1*time.Second { return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - - m.chosenTopicTag = "topic" + m.topicTagParse = "topic" if m.TopicTag != nil { - m.chosenTopicTag = *m.TopicTag + m.topicTagParse = *m.TopicTag } - opts, err := m.createOpts() if err != nil { return err } - m.opts = opts m.messages = map[telegraf.TrackingID]bool{} + for i, p := range m.TopicParsing { + splitMeasurement := strings.Split(p.Measurement, "/") + for j := range splitMeasurement { + if splitMeasurement[j] != "_" { + m.TopicParsing[i].MeasurementIndex = j + break + } + } + m.TopicParsing[i].SplitTags = strings.Split(p.Tags, "/") + m.TopicParsing[i].SplitFields = strings.Split(p.Fields, "/") + m.TopicParsing[i].SplitTopic = strings.Split(p.Topic, "/") + + if len(splitMeasurement) != len(m.TopicParsing[i].SplitTopic) { + return fmt.Errorf("config error topic parsing: measurement length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitFields) != len(m.TopicParsing[i].SplitTopic) { + return fmt.Errorf("config error topic parsing: fields length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitTags) != len(m.TopicParsing[i].SplitTopic) { + return fmt.Errorf("config error topic parsing: tags length does not equal topic length") + } + } + return nil } - func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.state = Disconnected - m.acc = acc.WithTracking(m.MaxUndeliveredMessages) m.sem = make(semaphore, m.MaxUndeliveredMessages) m.ctx, m.cancel = context.WithCancel(context.Background()) - m.client = m.clientFactory(m.opts) - // AddRoute sets up the function for handling messages. These need to be // added in case we find a persistent session containing subscriptions so we // know where to dispatch persisted and new messages to. In the alternate @@ -207,11 +227,9 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { for _, topic := range m.Topics { m.client.AddRoute(topic, m.recvMessage) } - m.state = Connecting return m.connect() } - func (m *MQTTConsumer) connect() error { token := m.client.Connect() if token.Wait() && token.Error() != nil { @@ -219,10 +237,8 @@ func (m *MQTTConsumer) connect() error { m.state = Disconnected return err } - m.Log.Infof("Connected %v", m.Servers) m.state = Connected - // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. type sessionPresent interface { @@ -232,28 +248,23 @@ func (m *MQTTConsumer) connect() error { m.Log.Debugf("Session found %v", m.Servers) return nil } - topics := make(map[string]byte) for _, topic := range m.Topics { topics[topic] = byte(m.QoS) } - subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", strings.Join(m.Topics[:], ","), subscribeToken.Error())) } - return nil } - func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected } - func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { for { select { @@ -279,26 +290,60 @@ func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { } } +// compareTopics is used to support the mqtt wild card `+` which allows for one topic of any value +func compareTopics(expected []string, incoming []string) bool { + if len(expected) != len(incoming) { + return false + } + + for i, expected := range expected { + if incoming[i] != expected && expected != "+" { + return false + } + } + + return true +} + func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Message) error { metrics, err := m.parser.Parse(msg.Payload()) if err != nil { return err } - if m.chosenTopicTag != "" { - topic := msg.Topic() - for _, metric := range metrics { - metric.AddTag(m.chosenTopicTag, topic) + for _, metric := range metrics { + if m.topicTagParse != "" { + metric.AddTag(m.topicTagParse, msg.Topic()) } - } + for _, p := range m.TopicParsing { + values := strings.Split(msg.Topic(), "/") + if !compareTopics(p.SplitTopic, values) { + continue + } + if p.Measurement != "" { + metric.SetName(values[p.MeasurementIndex]) + } + if p.Tags != "" { + err := parseMetric(p.SplitTags, values, p.FieldTypes, true, metric) + if err != nil { + return err + } + } + if p.Fields != "" { + err := parseMetric(p.SplitFields, values, p.FieldTypes, false, metric) + if err != nil { + return err + } + } + } + } id := acc.AddTrackingMetricGroup(metrics) m.messagesMutex.Lock() m.messages[id] = true m.messagesMutex.Unlock() return nil } - func (m *MQTTConsumer) Stop() { if m.state == Connected { m.Log.Debugf("Disconnecting %v", m.Servers) @@ -308,37 +353,29 @@ func (m *MQTTConsumer) Stop() { } m.cancel() } - func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) return m.connect() } - return nil } - func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - opts.ConnectTimeout = time.Duration(m.ConnectionTimeout) - if m.ClientID == "" { opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) } else { opts.SetClientID(m.ClientID) } - tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err } - if tlsCfg != nil { opts.SetTLSConfig(tlsCfg) } - user := m.Username if user != "" { opts.SetUsername(user) @@ -347,11 +384,9 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { if password != "" { opts.SetPassword(password) } - if len(m.Servers) == 0 { return opts, fmt.Errorf("could not get host informations") } - for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { @@ -362,17 +397,72 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { server = "ssl://" + server } } - opts.AddBroker(server) } opts.SetAutoReconnect(false) opts.SetKeepAlive(time.Second * 60) opts.SetCleanSession(!m.PersistentSession) opts.SetConnectionLostHandler(m.onConnectionLost) - return opts, nil } +// parseFields gets multiple fields from the topic based on the user configuration (TopicParsing.Fields) +func parseMetric(keys []string, values []string, types map[string]string, isTag bool, metric telegraf.Metric) error { + var metricFound bool + for i, k := range keys { + if k == "_" { + continue + } + + if isTag { + metric.AddTag(k, values[i]) + metricFound = true + } else { + newType, err := typeConvert(types, values[i], k) + if err != nil { + return err + } + metric.AddField(k, newType) + metricFound = true + } + } + if !metricFound { + return fmt.Errorf("no fields or tags found") + } + return nil +} + +func typeConvert(types map[string]string, topicValue string, key string) (interface{}, error) { + var newType interface{} + var err error + // If the user configured inputs.mqtt_consumer.topic.types, check for the desired type + if desiredType, ok := types[key]; ok { + switch desiredType { + case "uint": + newType, err = strconv.ParseUint(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", topicValue, err) + } + case "int": + newType, err = strconv.ParseInt(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", topicValue, err) + } + case "float": + newType, err = strconv.ParseFloat(topicValue, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", topicValue, err) + } + default: + return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType) + } + } else { + newType = topicValue + } + + return newType, nil +} + func New(factory ClientFactory) *MQTTConsumer { return &MQTTConsumer{ Servers: []string{"tcp://127.0.0.1:1883"}, @@ -382,7 +472,6 @@ func New(factory ClientFactory) *MQTTConsumer { state: Disconnected, } } - func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { return New(func(o *mqtt.ClientOptions) Client { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index a9b85c108ab65..7ba5609973d96 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -1,6 +1,7 @@ package mqtt_consumer import ( + "fmt" "testing" "time" @@ -153,6 +154,7 @@ func TestPersistentClientIDFail(t *testing.T) { } type Message struct { + topic string } func (m *Message) Duplicate() bool { @@ -168,7 +170,7 @@ func (m *Message) Retained() bool { } func (m *Message) Topic() string { - return "telegraf" + return m.topic } func (m *Message) MessageID() uint16 { @@ -185,12 +187,16 @@ func (m *Message) Ack() { func TestTopicTag(t *testing.T) { tests := []struct { - name string - topicTag func() *string - expected []telegraf.Metric + name string + topic string + topicTag func() *string + expectedError error + topicParsing []TopicParsingConfig + expected []telegraf.Metric }{ { - name: "default topic when topic tag is unset for backwards compatibility", + name: "default topic when topic tag is unset for backwards compatibility", + topic: "telegraf", topicTag: func() *string { return nil }, @@ -208,7 +214,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "use topic tag when set", + name: "use topic tag when set", + topic: "telegraf", topicTag: func() *string { tag := "topic_tag" return &tag @@ -227,7 +234,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "no topic tag is added when topic tag is set to the empty string", + name: "no topic tag is added when topic tag is set to the empty string", + topic: "telegraf", topicTag: func() *string { tag := "" return &tag @@ -243,6 +251,105 @@ func TestTopicTag(t *testing.T) { ), }, }, + { + name: "topic parsing configured", + topic: "telegraf/123/test", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/123/test", + Measurement: "_/_/measurement", + Tags: "testTag/_/_", + Fields: "_/testNumber/_", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured with a mqtt wild card `+`", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/testNumber/_/testString", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured incorrectly", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + expectedError: fmt.Errorf("config error topic parsing: fields length does not equal topic length"), + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/_/testNumber:int/_/testString:string", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -265,21 +372,28 @@ func TestTopicTag(t *testing.T) { return client }) plugin.Log = testutil.Logger{} - plugin.Topics = []string{"telegraf"} + plugin.Topics = []string{tt.topic} plugin.TopicTag = tt.topicTag() + plugin.TopicParsing = tt.topicParsing parser, err := parsers.NewInfluxParser() require.NoError(t, err) plugin.SetParser(parser) err = plugin.Init() - require.NoError(t, err) + require.Equal(t, tt.expectedError, err) + if tt.expectedError != nil { + return + } var acc testutil.Accumulator err = plugin.Start(&acc) require.NoError(t, err) - handler(nil, &Message{}) + var m Message + m.topic = tt.topic + + handler(nil, &m) plugin.Stop() From 9480e49eee069aeb418358cf1159988286be4132 Mon Sep 17 00:00:00 2001 From: bkotlowski Date: Tue, 23 Nov 2021 17:32:19 +0100 Subject: [PATCH 057/133] feat: add intel_pmu plugin (#9724) Co-authored-by: ktrojan --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 1 + go.sum | 2 + plugins/inputs/all/all.go | 1 + plugins/inputs/intel_pmu/README.md | 210 +++++++ plugins/inputs/intel_pmu/activators.go | 205 +++++++ plugins/inputs/intel_pmu/activators_test.go | 432 ++++++++++++++ plugins/inputs/intel_pmu/config.go | 239 ++++++++ plugins/inputs/intel_pmu/config_test.go | 230 ++++++++ plugins/inputs/intel_pmu/intel_pmu.go | 477 +++++++++++++++ .../intel_pmu/intel_pmu_notamd64linux.go | 4 + plugins/inputs/intel_pmu/intel_pmu_test.go | 555 ++++++++++++++++++ plugins/inputs/intel_pmu/mocks.go | 407 +++++++++++++ plugins/inputs/intel_pmu/reader.go | 249 ++++++++ plugins/inputs/intel_pmu/reader_test.go | 522 ++++++++++++++++ plugins/inputs/intel_pmu/resolver.go | 150 +++++ plugins/inputs/intel_pmu/resolver_test.go | 376 ++++++++++++ 17 files changed, 4061 insertions(+) create mode 100644 plugins/inputs/intel_pmu/README.md create mode 100644 plugins/inputs/intel_pmu/activators.go create mode 100644 plugins/inputs/intel_pmu/activators_test.go create mode 100644 plugins/inputs/intel_pmu/config.go create mode 100644 plugins/inputs/intel_pmu/config_test.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu_test.go create mode 100644 plugins/inputs/intel_pmu/mocks.go create mode 100644 plugins/inputs/intel_pmu/reader.go create mode 100644 plugins/inputs/intel_pmu/reader_test.go create mode 100644 plugins/inputs/intel_pmu/resolver.go create mode 100644 plugins/inputs/intel_pmu/resolver_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 440349a6f5c91..e1caaf320a0f9 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -132,6 +132,7 @@ following works: - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/intel/iaevents [Apache License 2.0](https://github.com/intel/iaevents/blob/main/LICENSE) - github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE) - github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE) - github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE) diff --git a/go.mod b/go.mod index cdbeb33266c42..f1940dcfa26c5 100644 --- a/go.mod +++ b/go.mod @@ -151,6 +151,7 @@ require ( github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/intel/iaevents v1.0.0 github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.5.0 // indirect github.com/jackc/pgio v1.0.0 // indirect diff --git a/go.sum b/go.sum index 2423bce22dc89..766b94293531e 100644 --- a/go.sum +++ b/go.sum @@ -1249,6 +1249,8 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E= +github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1320e7b025ca8..9a6b3bd830a66 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -71,6 +71,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu" _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" diff --git a/plugins/inputs/intel_pmu/README.md b/plugins/inputs/intel_pmu/README.md new file mode 100644 index 0000000000000..92a07d14e68f6 --- /dev/null +++ b/plugins/inputs/intel_pmu/README.md @@ -0,0 +1,210 @@ +# Intel Performance Monitoring Unit Plugin + +This input plugin exposes Intel PMU (Performance Monitoring Unit) metrics available through [Linux Perf](https://perf.wiki.kernel.org/index.php/Main_Page) subsystem. + +PMU metrics gives insight into performance and health of IA processor's internal components, +including core and uncore units. With the number of cores increasing and processor topology getting more complex +the insight into those metrics is vital to assure the best CPU performance and utilization. + +Performance counters are CPU hardware registers that count hardware events such as instructions executed, cache-misses suffered, or branches mispredicted. +They form a basis for profiling applications to trace dynamic control flow and identify hotspots. + +## Configuration + +```toml +# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +[[inputs.intel_pmu]] + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" +``` + +### Modifiers + +Perf modifiers adjust event-specific perf attribute to fulfill particular requirements. +Details about perf attribute structure could be found in [perf_event_open](https://man7.org/linux/man-pages/man2/perf_event_open.2.html) syscall manual. + +General schema of configuration's `events` list element: + +```regexp +EVENT_NAME(:(config|config1|config2)=(0x[0-9a-f]{1-16})(p|k|u|h|H|I|G|D))* +``` + +where: + +| Modifier | Underlying attribute | Description | +|----------|---------------------------------|-----------------------------| +| config | perf_event_attr.config | type-specific configuration | +| config1 | perf_event_attr.config1 | extension of config | +| config2 | perf_event_attr.config2 | extension of config1 | +| p | perf_event_attr.precise_ip | skid constraint | +| k | perf_event_attr.exclude_user | don't count user | +| u | perf_event_attr.exclude_kernel | don't count kernel | +| h / H | perf_event_attr.exclude_guest | don't count in guest | +| I | perf_event_attr.exclude_idle | don't count when idle | +| G | perf_event_attr.exclude_hv | don't count hypervisor | +| D | perf_event_attr.pinned | must always be on PMU | + +## Requirements + +The plugin is using [iaevents](https://github.com/intel/iaevents) library which is a golang package that makes accessing the Linux kernel's perf interface easier. + +Intel PMU plugin, is only intended for use on **linux 64-bit** systems. + +Event definition JSON files for specific architectures can be found at [01.org](https://download.01.org/perfmon/). +A script to download the event definitions that are appropriate for your system (event_download.py) is available at [pmu-tools](https://github.com/andikleen/pmu-tools). +Please keep these files in a safe place on your system. + +## Measuring + +Plugin allows measuring both core and uncore events. During plugin initialization the event names provided by user are compared +with event definitions included in JSON files and translated to perf attributes. Next, those events are activated to start counting. +During every telegraf interval, the plugin reads proper measurement for each previously activated event. + +Each single core event may be counted severally on every available CPU's core. In contrast, uncore events could be placed in +many PMUs within specified CPU package. The plugin allows choosing core ids (core events) or socket ids (uncore events) on which the counting should be executed. +Uncore events are separately activated on all socket's PMUs, and can be exposed as separate +measurement or to be summed up as one measurement. + +Obtained measurements are stored as three values: **Raw**, **Enabled** and **Running**. Raw is a total count of event. Enabled and running are total time the event was enabled and running. +Normally these are the same. If more events are started than available counter slots on the PMU, then multiplexing +occurs and events only run part of the time. Therefore, the plugin provides a 4-th value called **scaled** which is calculated using following formula: +`raw * enabled / running`. + +Events are measured for all running processes. + +### Core event groups + +Perf allows assembling events as a group. A perf event group is scheduled onto the CPU as a unit: it will be put onto the CPU only if all of the events in the group can be put onto the CPU. +This means that the values of the member events can be meaningfully compared — added, divided (to get ratios), and so on — with each other, +since they have counted events for the same set of executed instructions [(source)](https://man7.org/linux/man-pages/man2/perf_event_open.2.html). + +> **NOTE:** +> Be aware that the plugin will throw an error when trying to create core event group of size that exceeds available core PMU counters. +> The error message from perf syscall will be shown as "invalid argument". If you want to check how many PMUs are supported by your Intel CPU, you can use the [cpuid](https://linux.die.net/man/1/cpuid) command. + +### Note about file descriptors + +The plugin opens a number of file descriptors dependent on number of monitored CPUs and number of monitored +counters. It can easily exceed the default per process limit of allowed file descriptors. Depending on +configuration, it might be required to increase the limit of opened file descriptors allowed. +This can be done for example by using `ulimit -n command`. + +## Metrics + +On each Telegraf interval, Intel PMU plugin transmits following data: + +### Metric Fields + +| Field | Type | Description | +|---------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | uint64 | time counter, contains time the associated perf event was enabled | +| running | uint64 | time counter, contains time the event was actually counted | +| raw | uint64 | value counter, contains event count value during the time the event was actually counted | +| scaled | uint64 | value counter, contains approximated value of counter if the event was continuously counted, using scaled = raw * (enabled / running) formula | + +### Metric Tags - common + +| Tag | Description | +|-------|------------------------------| +| host | hostname as read by Telegraf | +| event | name of the event | + +### Metric Tags - core events + +| Tag | Description | +|------------|----------------------------------------------------------------------------------------------------| +| cpu | CPU id as identified by linux OS (either logical cpu id when HT on or physical cpu id when HT off) | +| events_tag | (optional) tag as defined in "intel_pmu.core_events" configuration element | + +### Metric Tags - uncore events + +| Tag | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| socket | socket number as identified by linux OS (physical_package_id) | +| unit_type | type of event-capable PMU that the event was counted for, provides category of PMU that the event was counted for, e.g. cbox for uncore_cbox_1, r2pcie for uncore_r2pcie etc. | +| unit | name of event-capable PMU that the event was counted for, as listed in /sys/bus/event_source/devices/ e.g. uncore_cbox_1, uncore_imc_1 etc. Present for non-aggregated uncore events only | +| events_tag| (optional) tag as defined in "intel_pmu.uncore_events" configuration element | + +## Example outputs + +Event group: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871237051i,running=2871237051i,raw=1171711i,scaled=1171711i 1621254096000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871240713i,running=2871240713i,raw=72340716i,scaled=72340716i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871118275i,running=2871118275i,raw=1646752i,scaled=1646752i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz raw=108802421i,scaled=108802421i,enabled=2871120107i,running=2871120107i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871143950i,running=2871143950i,raw=1316834i,scaled=1316834i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871074681i,running=2871074681i,raw=68728436i,scaled=68728436i 1621254096000000000 +``` + +Uncore event not aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_0,unit_type=cbox enabled=2870630747i,running=2870630747i,raw=183996i,scaled=183996i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_1,unit_type=cbox enabled=2870608194i,running=2870608194i,raw=185703i,scaled=185703i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_2,unit_type=cbox enabled=2870600211i,running=2870600211i,raw=187331i,scaled=187331i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_3,unit_type=cbox enabled=2870593914i,running=2870593914i,raw=184228i,scaled=184228i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_4,unit_type=cbox scaled=195355i,enabled=2870558952i,running=2870558952i,raw=195355i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_5,unit_type=cbox enabled=2870554131i,running=2870554131i,raw=197756i,scaled=197756i 1621254096000000000 +``` + +Uncore event aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit_type=cbox enabled=13199712335i,running=13199712335i,raw=467485i,scaled=467485i 1621254412000000000 +``` + +Time multiplexing: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,host=xyz raw=2947727i,scaled=4428970i,enabled=2201071844i,running=1464935978i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,host=xyz running=1465155618i,raw=302553190i,scaled=454511623i,enabled=2201035323i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK,host=xyz enabled=2200994057i,running=1466812391i,raw=3177535i,scaled=4767982i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK_ANY,host=xyz enabled=2200963921i,running=1470523496i,raw=3359272i,scaled=5027894i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES_ANY,host=xyz enabled=2200933946i,running=1470322480i,raw=23631950i,scaled=35374798i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES,host=xyz raw=18767833i,scaled=28169827i,enabled=2200888514i,running=1466317384i 1621254412000000000 +``` diff --git a/plugins/inputs/intel_pmu/activators.go b/plugins/inputs/intel_pmu/activators.go new file mode 100644 index 0000000000000..1750c72789c00 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators.go @@ -0,0 +1,205 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + + ia "github.com/intel/iaevents" +) + +type placementMaker interface { + makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) + makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) +} + +type iaPlacementMaker struct{} + +func (iaPlacementMaker) makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + var err error + var corePlacements []ia.PlacementProvider + + switch len(cores) { + case 0: + return nil, errors.New("no cores provided") + case 1: + corePlacements, err = ia.NewCorePlacements(factory, cores[0]) + if err != nil { + return nil, err + } + default: + corePlacements, err = ia.NewCorePlacements(factory, cores[0], cores[1:]...) + if err != nil { + return nil, err + } + } + return corePlacements, nil +} + +func (iaPlacementMaker) makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + return ia.NewUncoreAllPlacements(factory, socket) +} + +type eventsActivator interface { + activateEvent(ia.Activator, ia.PlacementProvider, ia.Options) (*ia.ActiveEvent, error) + activateGroup(ia.PlacementProvider, []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) + activateMulti(ia.MultiActivator, []ia.PlacementProvider, ia.Options) (*ia.ActiveMultiEvent, error) +} + +type iaEventsActivator struct{} + +func (iaEventsActivator) activateEvent(a ia.Activator, p ia.PlacementProvider, o ia.Options) (*ia.ActiveEvent, error) { + return a.Activate(p, ia.NewEventTargetProcess(-1, 0), o) +} + +func (iaEventsActivator) activateGroup(p ia.PlacementProvider, e []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) { + return ia.ActivateGroup(p, ia.NewEventTargetProcess(-1, 0), e) +} + +func (iaEventsActivator) activateMulti(a ia.MultiActivator, p []ia.PlacementProvider, o ia.Options) (*ia.ActiveMultiEvent, error) { + return a.ActivateMulti(p, ia.NewEventTargetProcess(-1, 0), o) +} + +type entitiesActivator interface { + activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesActivator struct { + placementMaker placementMaker + perfActivator eventsActivator +} + +func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, coreEventsEntity := range coreEntities { + err := ea.activateCoreEvents(coreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err) + } + } + for _, uncoreEventsEntity := range uncoreEntities { + err := ea.activateUncoreEvents(uncoreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error { + if entity == nil { + return fmt.Errorf("core events entity is nil") + } + if ea.placementMaker == nil { + return fmt.Errorf("placement maker is nil") + } + if entity.PerfGroup { + err := ea.activateCoreEventsGroup(entity) + if err != nil { + return fmt.Errorf("failed to activate core events group: %v", err) + } + } else { + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core parsed event is nil") + } + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event) + if err != nil { + return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err) + } + activeEvent, err := ea.activateEventForPlacements(event, placements) + if err != nil { + return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err) + } + entity.activeEvents = append(entity.activeEvents, activeEvent...) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error { + if entity == nil { + return fmt.Errorf("uncore events entity is nil") + } + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("events activator or placement maker is nil") + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("uncore parsed event is nil") + } + perfEvent := event.custom.Event + if perfEvent == nil { + return fmt.Errorf("perf event of `%s` event is nil", event.name) + } + options := event.custom.Options + + for _, socket := range entity.parsedSockets { + placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent) + if err != nil { + return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err) + } + activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options) + if err != nil { + return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err) + } + events := activeMultiEvent.Events() + entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket}) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error { + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("missing perf activator or placement maker") + } + if entity == nil || len(entity.parsedEvents) < 1 { + return fmt.Errorf("missing parsed events") + } + + var events []ia.CustomizableEvent + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core event is nil") + } + events = append(events, event.custom) + } + leader := entity.parsedEvents[0].custom + + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event) + if err != nil { + return fmt.Errorf("failed to make core placements: %v", err) + } + + for _, plc := range placements { + activeGroup, err := ea.perfActivator.activateGroup(plc, events) + if err != nil { + return err + } + entity.activeEvents = append(entity.activeEvents, activeGroup.Events()...) + } + return nil +} + +func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) { + if event == nil { + return nil, fmt.Errorf("core event is nil") + } + if ea.perfActivator == nil { + return nil, fmt.Errorf("missing perf activator") + } + var activeEvents []*ia.ActiveEvent + for _, placement := range placements { + perfEvent := event.custom.Event + options := event.custom.Options + + activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options) + if err != nil { + return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err) + } + activeEvents = append(activeEvents, activeEvent) + } + return activeEvents, nil +} diff --git a/plugins/inputs/intel_pmu/activators_test.go b/plugins/inputs/intel_pmu/activators_test.go new file mode 100644 index 0000000000000..28f05710d3e69 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators_test.go @@ -0,0 +1,432 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type mockPlacementFactory struct { + err bool +} + +func (m *mockPlacementFactory) NewPlacements(_ string, cpu int, cpus ...int) ([]ia.PlacementProvider, error) { + if m.err { + return nil, errors.New("mock error") + } + placements := []ia.PlacementProvider{ + &ia.Placement{CPU: cpu, PMUType: 4}, + } + for _, cpu := range cpus { + placements = append(placements, &ia.Placement{CPU: cpu, PMUType: 4}) + } + return placements, nil +} + +func TestActivateEntities(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{} + + // more core test cases in TestActivateCoreEvents + t.Run("failed to activate core events", func(t *testing.T) { + tag := "TAG" + mEntities := []*CoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(mEntities, nil) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag)) + }) + + // more uncore test cases in TestActivateUncoreEvents + t.Run("failed to activate uncore events", func(t *testing.T) { + tag := "TAG" + mEntities := []*UncoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(nil, mEntities) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag)) + }) + + t.Run("nothing to do", func(t *testing.T) { + err := mEntitiesActivator.activateEntities(nil, nil) + require.NoError(t, err) + }) +} + +func TestActivateUncoreEvents(t *testing.T) { + mActivator := &mockEventsActivator{} + mMaker := &mockPlacementMaker{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateUncoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore events entity is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore parsed event is nil") + }) + + t.Run("perf event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + name := "event name" + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name)) + }) + + t.Run("placement maker and perf activator is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: nil} + err := mEntitiesActivator.activateUncoreEvents(&UncoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "events activator or placement maker is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(nil, errMock).Once() + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(placements, nil).Once() + mActivator.On("activateMulti", parsedEvents[0].custom.Event, placements, parsedEvents[0].custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4", Uncore: true}}}, + } + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}, &ia.Placement{}} + + var expectedEvents []multiEvent + for _, event := range parsedEvents { + for _, socket := range mEntity.parsedSockets { + mMaker.On("makeUncorePlacements", event.custom.Event, socket).Return(placements, nil).Once() + newActiveMultiEvent := &ia.ActiveMultiEvent{} + expectedEvents = append(expectedEvents, multiEvent{newActiveMultiEvent.Events(), event.custom.Event, socket}) + mActivator.On("activateMulti", event.custom.Event, placements, event.custom.Options).Return(newActiveMultiEvent, nil).Once() + } + } + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.NoError(t, err) + require.Equal(t, expectedEvents, mEntity.activeMultiEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEvents(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "core events entity is nil") + }) + + t.Run("placement maker is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(&CoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "placement maker is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core parsed event is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + mMaker.On("makeCorePlacements", mEntity.parsedCores, parsedEvents[0].custom.Event).Return(nil, errMock).Once() + err := mEntitiesActivator.activateCoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + event := parsedEvents[0] + plc := placements[0] + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("failed to activate core events group", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: nil} + mEntity := &CoreEventEntity{PerfGroup: true, parsedEvents: nil} + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to activate core events group") + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4"}}}, + } + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}, &ia.Placement{CPU: 2}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + var activeEvents []*ia.ActiveEvent + for _, event := range parsedEvents { + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + for _, plc := range placements { + newActiveEvent := &ia.ActiveEvent{PerfEvent: event.custom.Event} + activeEvents = append(activeEvents, newActiveEvent) + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(newActiveEvent, nil).Once() + } + } + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.NoError(t, err) + require.Equal(t, activeEvents, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEventsGroup(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + eActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + errMock := errors.New("mock error") + + leader := &ia.PerfEvent{Name: "mock event 1"} + perfEvent2 := &ia.PerfEvent{Name: "mock event 2"} + + parsedEvents := []*eventWithQuals{{custom: ia.CustomizableEvent{Event: leader}}, {custom: ia.CustomizableEvent{Event: perfEvent2}}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}} + + // cannot populate this struct due to unexported events field + activeGroup := &ia.ActiveEventGroup{} + + mEntity := &CoreEventEntity{ + EventsTag: "mock group", + PerfGroup: true, + parsedEvents: parsedEvents, + parsedCores: nil, + } + + var events []ia.CustomizableEvent + for _, event := range parsedEvents { + events = append(events, event.custom) + } + + t.Run("missing perf activator and placement maker", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator or placement maker") + }) + + t.Run("missing parsed events", func(t *testing.T) { + mActivator := &iaEntitiesActivator{placementMaker: &mockPlacementMaker{}, perfActivator: &mockEventsActivator{}} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing parsed events") + }) + + t.Run("nil in parsed event", func(t *testing.T) { + mEntity := &CoreEventEntity{EventsTag: "Nice tag", PerfGroup: true, parsedEvents: []*eventWithQuals{nil, nil}} + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + }) + + t.Run("failed to make core placements", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(nil, errMock).Once() + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to make core placements") + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + mActivator.On("activateGroup", placements[0], events).Return(nil, errMock).Once() + + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), errMock.Error()) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + var allActive []*ia.ActiveEvent + t.Run("successfully activated group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + for _, plc := range placements { + mActivator.On("activateGroup", plc, events).Return(activeGroup, nil).Once() + allActive = append(allActive, activeGroup.Events()...) + } + + err := eActivator.activateCoreEventsGroup(mEntity) + require.NoError(t, err) + require.Equal(t, allActive, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestMakeCorePlacements(t *testing.T) { + tests := []struct { + name string + cores []int + perfEvent ia.PlacementFactory + result []ia.PlacementProvider + errMsg string + }{ + {"no cores", nil, &ia.PerfEvent{}, nil, "no cores provided"}, + {"one core placement", []int{1}, &mockPlacementFactory{}, []ia.PlacementProvider{&ia.Placement{CPU: 1, PMUType: 4}}, ""}, + {"multiple core placement", []int{1, 2, 4}, &mockPlacementFactory{}, []ia.PlacementProvider{ + &ia.Placement{CPU: 1, PMUType: 4}, + &ia.Placement{CPU: 2, PMUType: 4}, + &ia.Placement{CPU: 4, PMUType: 4}}, + ""}, + {"placement factory error", []int{1}, &mockPlacementFactory{true}, nil, "mock error"}, + {"placement factory error 2", []int{1, 2, 3}, &mockPlacementFactory{true}, nil, "mock error"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + maker := &iaPlacementMaker{} + providers, err := maker.makeCorePlacements(test.cores, test.perfEvent) + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Nil(t, providers) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.result, providers) + }) + } +} + +func TestActivateEventForPlacement(t *testing.T) { + placement1 := &ia.Placement{CPU: 0} + placement2 := &ia.Placement{CPU: 1} + placement3 := &ia.Placement{CPU: 2} + + mPlacements := []ia.PlacementProvider{placement1, placement2, placement3} + + mPerfEvent := &ia.PerfEvent{Name: "mock1"} + mOptions := &ia.PerfEventOptions{} + mEvent := &eventWithQuals{name: mPerfEvent.Name, custom: ia.CustomizableEvent{Event: mPerfEvent, Options: mOptions}} + + mPerfActivator := &mockEventsActivator{} + mActivator := &iaEntitiesActivator{perfActivator: mPerfActivator} + + t.Run("event is nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(nil, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + require.Nil(t, activeEvents) + }) + + t.Run("perf activator is nil", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator") + require.Nil(t, activeEvents) + }) + + t.Run("placements are nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(mEvent, nil) + require.NoError(t, err) + require.Nil(t, activeEvents) + }) + + t.Run("activation error", func(t *testing.T) { + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err")) + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name)) + require.Nil(t, activeEvents) + mPerfActivator.AssertExpectations(t) + }) + + t.Run("successfully activated", func(t *testing.T) { + mActiveEvent := &ia.ActiveEvent{} + mActiveEvent2 := &ia.ActiveEvent{} + mActiveEvent3 := &ia.ActiveEvent{} + + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(mActiveEvent, nil). + On("activateEvent", mPerfEvent, placement2, mOptions).Once().Return(mActiveEvent2, nil). + On("activateEvent", mPerfEvent, placement3, mOptions).Once().Return(mActiveEvent3, nil) + + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.NoError(t, err) + require.Len(t, activeEvents, len(mPlacements)) + require.Contains(t, activeEvents, mActiveEvent) + require.Contains(t, activeEvents, mActiveEvent2) + mPerfActivator.AssertExpectations(t) + }) +} diff --git a/plugins/inputs/intel_pmu/config.go b/plugins/inputs/intel_pmu/config.go new file mode 100644 index 0000000000000..c788744e9549b --- /dev/null +++ b/plugins/inputs/intel_pmu/config.go @@ -0,0 +1,239 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +// Maximum size of core IDs or socket IDs (8192). Based on maximum value of CPUs that linux kernel supports. +const maxIDsSize = 1 << 13 + +type entitiesParser interface { + parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) +} + +type configParser struct { + log telegraf.Logger + sys sysInfoProvider +} + +func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) { + if len(coreEntities) == 0 && len(uncoreEntities) == 0 { + return fmt.Errorf("neither core nor uncore entities configured") + } + + for _, coreEntity := range coreEntities { + if coreEntity == nil { + return fmt.Errorf("core entity is nil") + } + if coreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all core events from provided files will be configured") + } + coreEntity.allEvents = true + } else { + events := cp.parseEvents(coreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of core events was provided") + } + coreEntity.parsedEvents = events + } + + coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores) + if err != nil { + return fmt.Errorf("error during cores parsing: %v", err) + } + } + + for _, uncoreEntity := range uncoreEntities { + if uncoreEntity == nil { + return fmt.Errorf("uncore entity is nil") + } + if uncoreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all uncore events from provided files will be configured") + } + uncoreEntity.allEvents = true + } else { + events := cp.parseEvents(uncoreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of uncore events was provided") + } + uncoreEntity.parsedEvents = events + } + + uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets) + if err != nil { + return fmt.Errorf("error during sockets parsing: %v", err) + } + } + return nil +} + +func (cp *configParser) parseEvents(events []string) []*eventWithQuals { + if len(events) == 0 { + return nil + } + + events, duplications := removeDuplicateStrings(events) + for _, duplication := range duplications { + if cp.log != nil { + cp.log.Warnf("duplicated event `%s` will be removed", duplication) + } + } + return parseEventsWithQualifiers(events) +} + +func (cp *configParser) parseCores(cores []string) ([]int, error) { + if cores == nil { + if cp.log != nil { + cp.log.Debug("all possible cores will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + cores, err := cp.sys.allCPUs() + if err != nil { + return nil, fmt.Errorf("cannot obtain all cpus: %v", err) + } + return cores, nil + } + if len(cores) == 0 { + return nil, fmt.Errorf("an empty list of cores was provided") + } + + result, err := cp.parseIntRanges(cores) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseSockets(sockets []string) ([]int, error) { + if sockets == nil { + if cp.log != nil { + cp.log.Debug("all possible sockets will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + sockets, err := cp.sys.allSockets() + if err != nil { + return nil, fmt.Errorf("cannot obtain all sockets: %v", err) + } + return sockets, nil + } + if len(sockets) == 0 { + return nil, fmt.Errorf("an empty list of sockets was provided") + } + + result, err := cp.parseIntRanges(sockets) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseIntRanges(ranges []string) ([]int, error) { + var ids []int + var duplicatedIDs []int + var err error + ids, err = parseIDs(ranges) + if err != nil { + return nil, err + } + ids, duplicatedIDs = removeDuplicateValues(ids) + for _, duplication := range duplicatedIDs { + if cp.log != nil { + cp.log.Warnf("duplicated id number `%d` will be removed", duplication) + } + } + return ids, nil +} + +func parseEventsWithQualifiers(events []string) []*eventWithQuals { + var result []*eventWithQuals + + for _, event := range events { + newEventWithQualifiers := &eventWithQuals{} + + split := strings.Split(event, ":") + newEventWithQualifiers.name = split[0] + + if len(split) > 1 { + newEventWithQualifiers.qualifiers = split[1:] + } + result = append(result, newEventWithQualifiers) + } + return result +} + +func parseIDs(allIDsStrings []string) ([]int, error) { + var result []int + for _, idsString := range allIDsStrings { + ids := strings.Split(idsString, ",") + + for _, id := range ids { + id := strings.TrimSpace(id) + // a-b support + var start, end uint + n, err := fmt.Sscanf(id, "%d-%d", &start, &end) + if err == nil && n == 2 { + if start >= end { + return nil, fmt.Errorf("`%d` is equal or greater than `%d`", start, end) + } + for ; start <= end; start++ { + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, int(start)) + } + continue + } + // Single value + num, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err) + } + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, num) + } + } + return result, nil +} + +func removeDuplicateValues(intSlice []int) (result []int, duplicates []int) { + keys := make(map[int]bool) + + for _, entry := range intSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} + +func removeDuplicateStrings(strSlice []string) (result []string, duplicates []string) { + keys := make(map[string]bool) + + for _, entry := range strSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} diff --git a/plugins/inputs/intel_pmu/config_test.go b/plugins/inputs/intel_pmu/config_test.go new file mode 100644 index 0000000000000..5a0f288e3b443 --- /dev/null +++ b/plugins/inputs/intel_pmu/config_test.go @@ -0,0 +1,230 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestConfigParser_parseEntities(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + e := ia.CustomizableEvent{} + + t.Run("no entities", func(t *testing.T) { + err := mConfigParser.parseEntities(nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "neither core nor uncore entities configured") + }) + + // more specific parsing cases in TestConfigParser_parseIntRanges and TestConfigParser_parseEvents + coreTests := []struct { + name string + + coreEntity *CoreEventEntity + parsedCoreEvents []*eventWithQuals + parsedCores []int + coreAll bool + + uncoreEntity *UncoreEventEntity + parsedUncoreEvents []*eventWithQuals + parsedSockets []int + uncoreAll bool + + failMsg string + }{ + {"no events provided", + &CoreEventEntity{Events: nil, Cores: []string{"1"}}, nil, []int{1}, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0"}}, nil, []int{0}, true, + ""}, + {"uncore entity is nil", + &CoreEventEntity{Events: []string{"EVENT"}, Cores: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + nil, nil, nil, false, + "uncore entity is nil"}, + {"core entity is nil", + nil, nil, nil, false, + &UncoreEventEntity{Events: []string{"EVENT"}, Sockets: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + "core entity is nil"}, + {"error parsing sockets", + &CoreEventEntity{Events: nil, Cores: []string{"1,2"}}, nil, []int{1, 2}, true, + &UncoreEventEntity{Events: []string{"E"}, Sockets: []string{"wrong sockets"}}, []*eventWithQuals{{"E", nil, e}}, nil, false, + "error during sockets parsing"}, + {"error parsing cores", + &CoreEventEntity{Events: nil, Cores: []string{"wrong cpus"}}, nil, nil, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0,1"}}, nil, []int{0, 1}, true, + "error during cores parsing"}, + {"valid settings", + &CoreEventEntity{Events: []string{"E1", "E2:config=123"}, Cores: []string{"1-5"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", []string{"config=123"}, e}}, []int{1, 2, 3, 4, 5}, false, + &UncoreEventEntity{Events: []string{"E1", "E2", "E3"}, Sockets: []string{"0,2-6"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", nil, e}, {"E3", nil, e}}, []int{0, 2, 3, 4, 5, 6}, false, + ""}, + } + + for _, test := range coreTests { + t.Run(test.name, func(t *testing.T) { + coreEntities := []*CoreEventEntity{test.coreEntity} + uncoreEntities := []*UncoreEventEntity{test.uncoreEntity} + + err := mConfigParser.parseEntities(coreEntities, uncoreEntities) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.coreAll, test.coreEntity.allEvents) + require.Equal(t, test.parsedCores, test.coreEntity.parsedCores) + require.Equal(t, test.parsedCoreEvents, test.coreEntity.parsedEvents) + + require.Equal(t, test.uncoreAll, test.uncoreEntity.allEvents) + require.Equal(t, test.parsedSockets, test.uncoreEntity.parsedSockets) + require.Equal(t, test.parsedUncoreEvents, test.uncoreEntity.parsedEvents) + }) + } +} + +func TestConfigParser_parseCores(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no cores provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all cpus info", func(t *testing.T) { + mSysInfo.On("allCPUs").Return(nil, errors.New("all cpus error")).Once() + result, err := mConfigParser.parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all cpus") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allCPUs := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} + + mSysInfo.On("allCPUs").Return(allCPUs, nil).Once() + result, err := mConfigParser.parseCores(nil) + require.NoError(t, err) + require.Equal(t, allCPUs, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseSockets(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no sockets provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all sockets info", func(t *testing.T) { + mSysInfo.On("allSockets").Return(nil, errors.New("all sockets error")).Once() + result, err := mConfigParser.parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all sockets") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allSockets := []int{0, 1, 2, 3, 4} + + mSysInfo.On("allSockets").Return(allSockets, nil).Once() + result, err := mConfigParser.parseSockets(nil) + require.NoError(t, err) + require.Equal(t, allSockets, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseEvents(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + e := ia.CustomizableEvent{} + + tests := []struct { + name string + input []string + result []*eventWithQuals + }{ + {"no events", nil, nil}, + {"single string", []string{"mock string"}, []*eventWithQuals{{"mock string", nil, e}}}, + {"two events", []string{"EVENT.FIRST", "EVENT.SECOND"}, []*eventWithQuals{{"EVENT.FIRST", nil, e}, {"EVENT.SECOND", nil, e}}}, + {"event with configs", []string{"EVENT.SECOND:config1=0x404300k:config2=0x404300k"}, + []*eventWithQuals{{"EVENT.SECOND", []string{"config1=0x404300k", "config2=0x404300k"}, e}}}, + {"two events with modifiers", []string{"EVENT.FIRST:config1=0x200300:config2=0x231100:u:H", "EVENT.SECOND:K:p"}, + []*eventWithQuals{{"EVENT.FIRST", []string{"config1=0x200300", "config2=0x231100", "u", "H"}, e}, {"EVENT.SECOND", []string{"K", "p"}, e}}}, + {"duplicates", []string{"EVENT1", "EVENT1", "EVENT2"}, []*eventWithQuals{{"EVENT1", nil, e}, {"EVENT2", nil, e}}}, + {"duplicates with different configs", []string{"EVENT1:config1", "EVENT1:config2"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}, {"EVENT1", []string{"config2"}, e}}}, + {"duplicates with the same modifiers", []string{"EVENT1:config1", "EVENT1:config1"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := mConfigParser.parseEvents(test.input) + require.Equal(t, test.result, result) + }) + } +} + +func TestConfigParser_parseIntRanges(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + tests := []struct { + name string + input []string + result []int + failMsg string + }{ + {"coma separated", []string{"0,1,2,3,4"}, []int{0, 1, 2, 3, 4}, ""}, + {"range", []string{"0-10"}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, ""}, + {"mixed", []string{"0-3", "4", "12-16"}, []int{0, 1, 2, 3, 4, 12, 13, 14, 15, 16}, ""}, + {"min and max values", []string{"-2147483648", "2147483647"}, []int{math.MinInt32, math.MaxInt32}, ""}, + {"should remove duplicates", []string{"1-5", "2-6"}, []int{1, 2, 3, 4, 5, 6}, ""}, + {"wrong format", []string{"1,2,3%$S,-100"}, nil, "wrong format for id"}, + {"start is greater than end", []string{"10-3"}, nil, "`10` is equal or greater than `3"}, + {"too big value", []string{"18446744073709551615"}, nil, "wrong format for id"}, + {"too much numbers", []string{fmt.Sprintf("0-%d", maxIDsSize)}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + {"too much numbers mixed", []string{fmt.Sprintf("1-%d", maxIDsSize), "0"}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, err := mConfigParser.parseIntRanges(test.input) + require.Equal(t, test.result, result) + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + }) + } +} diff --git a/plugins/inputs/intel_pmu/intel_pmu.go b/plugins/inputs/intel_pmu/intel_pmu.go new file mode 100644 index 0000000000000..99818c4a7bfdb --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu.go @@ -0,0 +1,477 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "io/ioutil" + "math" + "math/big" + "os" + "strconv" + "strings" + "syscall" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + ia "github.com/intel/iaevents" +) + +// Linux availability: https://www.kernel.org/doc/Documentation/sysctl/fs.txt +const fileMaxPath = "/proc/sys/fs/file-max" + +type fileInfoProvider interface { + readFile(string) ([]byte, error) + lstat(string) (os.FileInfo, error) + fileLimit() (uint64, error) +} + +type fileHelper struct{} + +func (fileHelper) readFile(path string) ([]byte, error) { + return ioutil.ReadFile(path) +} + +func (fileHelper) lstat(path string) (os.FileInfo, error) { + return os.Lstat(path) +} + +func (fileHelper) fileLimit() (uint64, error) { + var rLimit syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) + return rLimit.Cur, err +} + +type sysInfoProvider interface { + allCPUs() ([]int, error) + allSockets() ([]int, error) +} + +type iaSysInfo struct{} + +func (iaSysInfo) allCPUs() ([]int, error) { + return ia.AllCPUs() +} + +func (iaSysInfo) allSockets() ([]int, error) { + return ia.AllSockets() +} + +// IntelPMU is the plugin type. +type IntelPMU struct { + EventListPaths []string `toml:"event_definitions"` + CoreEntities []*CoreEventEntity `toml:"core_events"` + UncoreEntities []*UncoreEventEntity `toml:"uncore_events"` + + Log telegraf.Logger `toml:"-"` + + fileInfo fileInfoProvider + entitiesReader entitiesValuesReader +} + +// CoreEventEntity represents config section for core events. +type CoreEventEntity struct { + Events []string `toml:"events"` + Cores []string `toml:"cores"` + EventsTag string `toml:"events_tag"` + PerfGroup bool `toml:"perf_group"` + + parsedEvents []*eventWithQuals + parsedCores []int + allEvents bool + + activeEvents []*ia.ActiveEvent +} + +// UncoreEventEntity represents config section for uncore events. +type UncoreEventEntity struct { + Events []string `toml:"events"` + Sockets []string `toml:"sockets"` + Aggregate bool `toml:"aggregate_uncore_units"` + EventsTag string `toml:"events_tag"` + + parsedEvents []*eventWithQuals + parsedSockets []int + allEvents bool + + activeMultiEvents []multiEvent +} + +type multiEvent struct { + activeEvents []*ia.ActiveEvent + perfEvent *ia.PerfEvent + socket int +} + +type eventWithQuals struct { + name string + qualifiers []string + + custom ia.CustomizableEvent +} + +func (i *IntelPMU) Description() string { + return "Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem" +} + +func (i *IntelPMU) SampleConfig() string { + return ` + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" +` +} + +// Start is required for IntelPMU to implement the telegraf.ServiceInput interface. +// Necessary initialization and config checking are done in Init. +func (IntelPMU) Start(_ telegraf.Accumulator) error { + return nil +} + +func (i *IntelPMU) Init() error { + err := checkFiles(i.EventListPaths, i.fileInfo) + if err != nil { + return fmt.Errorf("error during event definitions paths validation: %v", err) + } + + reader, err := newReader(i.EventListPaths) + if err != nil { + return err + } + transformer := ia.NewPerfTransformer() + resolver := &iaEntitiesResolver{reader: reader, transformer: transformer, log: i.Log} + parser := &configParser{log: i.Log, sys: &iaSysInfo{}} + activator := &iaEntitiesActivator{perfActivator: &iaEventsActivator{}, placementMaker: &iaPlacementMaker{}} + + i.entitiesReader = &iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: &realClock{}} + + return i.initialization(parser, resolver, activator) +} + +func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error { + if parser == nil || resolver == nil || activator == nil { + return fmt.Errorf("entities parser and/or resolver and/or activator is nil") + } + + err := parser.parseEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during parsing configuration sections: %v", err) + } + + err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events resolving: %v", err) + } + + err = i.checkFileDescriptors() + if err != nil { + return fmt.Errorf("error during file descriptors checking: %v", err) + } + + err = activator.activateEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events activation: %v", err) + } + return nil +} + +func (i *IntelPMU) checkFileDescriptors() error { + coreFd, err := estimateCoresFd(i.CoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err) + } + uncoreFd, err := estimateUncoreFd(i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err) + } + if coreFd > math.MaxUint64-uncoreFd { + return fmt.Errorf("requested number of file descriptors exceeds uint64") + } + allFd := coreFd + uncoreFd + + // maximum file descriptors enforced on a kernel level + maxFd, err := readMaxFD(i.fileInfo) + if err != nil { + i.Log.Warnf("cannot obtain number of available file descriptors: %v", err) + } else if allFd > maxFd { + return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", allFd, maxFd) + } + + // soft limit for current process + limit, err := i.fileInfo.fileLimit() + if err != nil { + i.Log.Warnf("cannot obtain limit value of open files: %v", err) + } else if allFd > limit { + return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", allFd, limit) + } + + return nil +} + +func (i *IntelPMU) Gather(acc telegraf.Accumulator) error { + if i.entitiesReader == nil { + return fmt.Errorf("entities reader is nil") + } + coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to read entities events values: %v", err) + } + + for id, m := range coreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + coreMetrics[id].scaled = scaled.Uint64() + } + for id, m := range uncoreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + uncoreMetrics[id].scaled = scaled.Uint64() + } + + publishCoreMeasurements(coreMetrics, acc) + publishUncoreMeasurements(uncoreMetrics, acc) + + return nil +} + +func (i *IntelPMU) Stop() { + for _, entity := range i.CoreEntities { + if entity == nil { + continue + } + for _, event := range entity.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err) + } + } + } + for _, entity := range i.UncoreEntities { + if entity == nil { + continue + } + for _, multi := range entity.activeMultiEvents { + for _, event := range multi.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err) + } + } + } + } +} + +func newReader(files []string) (*ia.JSONFilesReader, error) { + reader := ia.NewFilesReader() + for _, file := range files { + err := reader.AddFiles(file) + if err != nil { + return nil, fmt.Errorf("failed to add files to reader: %v", err) + } + } + return reader, nil +} + +func estimateCoresFd(entities []*CoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + events := uint64(len(entity.parsedEvents)) + cores := uint64(len(entity.parsedCores)) + number, err = multiplyAndAdd(events, cores, number) + if err != nil { + return 0, err + } + } + return number, nil +} + +func estimateUncoreFd(entities []*UncoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + for _, e := range entity.parsedEvents { + if e.custom.Event == nil { + continue + } + pmus := uint64(len(e.custom.Event.PMUTypes)) + sockets := uint64(len(entity.parsedSockets)) + number, err = multiplyAndAdd(pmus, sockets, number) + if err != nil { + return 0, err + } + } + } + return number, nil +} + +func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error) { + bigA := new(big.Int).SetUint64(factorA) + bigB := new(big.Int).SetUint64(factorB) + activeEvents := new(big.Int).Mul(bigA, bigB) + if !activeEvents.IsUint64() { + return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String()) + } + if sum > math.MaxUint64-activeEvents.Uint64() { + return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) + } + sum += activeEvents.Uint64() + return sum, nil +} + +func readMaxFD(reader fileInfoProvider) (uint64, error) { + if reader == nil { + return 0, fmt.Errorf("file reader is nil") + } + buf, err := reader.readFile(fileMaxPath) + if err != nil { + return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err) + } + max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err) + } + return max, nil +} + +func checkFiles(paths []string, fileInfo fileInfoProvider) error { + // No event definition JSON locations present + if len(paths) == 0 { + return fmt.Errorf("no paths were given") + } + if fileInfo == nil { + return fmt.Errorf("file info provider is nil") + } + // Wrong files + for _, path := range paths { + lInfo, err := fileInfo.lstat(path) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("file `%s` doesn't exist", path) + } + return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + } + mode := lInfo.Mode() + if mode&os.ModeSymlink != 0 { + return fmt.Errorf("file %s is a symlink", path) + } + if !mode.IsRegular() { + return fmt.Errorf("file `%s` doesn't point to a reagular file", path) + } + } + return nil +} + +func publishCoreMeasurements(metrics []coreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + tags["cpu"] = strconv.Itoa(m.cpu) + + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func publishUncoreMeasurements(metrics []uncoreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + + tags["socket"] = strconv.Itoa(m.socket) + tags["unit_type"] = m.unitType + if !m.agg { + tags["unit"] = m.unit + } + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func init() { + inputs.Add("intel_pmu", func() telegraf.Input { + pmu := IntelPMU{ + fileInfo: &fileHelper{}, + } + return &pmu + }) +} diff --git a/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go new file mode 100644 index 0000000000000..64c7f5bbf1ce1 --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go @@ -0,0 +1,4 @@ +//go:build !linux || !amd64 +// +build !linux !amd64 + +package intel_pmu diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go new file mode 100644 index 0000000000000..e096c4c021d0e --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -0,0 +1,555 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestInitialization(t *testing.T) { + mError := errors.New("mock error") + mParser := &mockEntitiesParser{} + mResolver := &mockEntitiesResolver{} + mActivator := &mockEntitiesActivator{} + mFileInfo := &mockFileInfoProvider{} + + file := "path/to/file" + paths := []string{file} + + t.Run("missing parser, resolver or activator", func(t *testing.T) { + err := (&IntelPMU{}).initialization(mParser, nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, mResolver, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, nil, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + }) + + t.Run("parse entities error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during parsing configuration sections") + mParser.AssertExpectations(t) + }) + + t.Run("resolver error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events resolving") + mParser.AssertExpectations(t) + }) + + t.Run("exceeded file descriptors", func(t *testing.T) { + limit := []byte("10") + uncoreEntities := []*UncoreEventEntity{{parsedEvents: makeEvents(10, 21), parsedSockets: makeIDs(5)}} + estimation := 1050 + + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo, UncoreEntities: uncoreEntities} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(limit, nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", estimation, 10)) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + }) + + t.Run("failed to activate entities", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events activation") + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("everything all right", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestGather(t *testing.T) { + mEntitiesValuesReader := &mockEntitiesValuesReader{} + mAcc := &testutil.Accumulator{} + + mIntelPMU := &IntelPMU{entitiesReader: mEntitiesValuesReader} + + type fieldWithTags struct { + fields map[string]interface{} + tags map[string]string + } + + t.Run("entities reader is nil", func(t *testing.T) { + err := (&IntelPMU{entitiesReader: nil}).Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), "entities reader is nil") + }) + + t.Run("error while reading entities", func(t *testing.T) { + errMock := fmt.Errorf("houston we have a problem") + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(nil, nil, errMock).Once() + + err := mIntelPMU.Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read entities events values: %v", errMock)) + mEntitiesValuesReader.AssertExpectations(t) + }) + + tests := []struct { + name string + coreMetrics []coreMetric + uncoreMetrics []uncoreMetric + results []fieldWithTags + errMSg string + }{ + { + name: "successful readings", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: 100, Enabled: 200, Running: 200}, + name: "CORE_EVENT_1", + tag: "DOGES", + cpu: 1, + }, + { + values: ia.CounterValue{Raw: 2100, Enabled: 400, Running: 200}, + name: "CORE_EVENT_2", + cpu: 0, + }, + }, + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: 2134562, Enabled: 1000000, Running: 1000000}, + name: "UNCORE_EVENT_1", + tag: "SHIBA", + unitType: "cbox", + unit: "cbox_1", + socket: 3, + agg: false, + }, + { + values: ia.CounterValue{Raw: 2134562, Enabled: 3222222, Running: 2100000}, + name: "UNCORE_EVENT_2", + unitType: "cbox", + socket: 0, + agg: true, + }, + }, + results: []fieldWithTags{ + { + fields: map[string]interface{}{ + "raw": uint64(100), + "enabled": uint64(200), + "running": uint64(200), + "scaled": uint64(100), + }, + tags: map[string]string{ + "event": "CORE_EVENT_1", + "cpu": "1", + "events_tag": "DOGES", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2100), + "enabled": uint64(400), + "running": uint64(200), + "scaled": uint64(4200), + }, + tags: map[string]string{ + "event": "CORE_EVENT_2", + "cpu": "0", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(1000000), + "running": uint64(1000000), + "scaled": uint64(2134562), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_1", + "events_tag": "SHIBA", + "socket": "3", + "unit_type": "cbox", + "unit": "cbox_1", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(3222222), + "running": uint64(2100000), + "scaled": uint64(3275253), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_2", + "socket": "0", + "unit_type": "cbox", + }, + }, + }, + }, + { + name: "core scaled value greater then max uint64", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64", + }, + { + name: "uncore scaled value greater then max uint64", + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG_UNCORE", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(test.coreMetrics, test.uncoreMetrics, nil).Once() + + err := mIntelPMU.Gather(mAcc) + + mEntitiesValuesReader.AssertExpectations(t) + if len(test.errMSg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMSg) + return + } + require.NoError(t, err) + for _, result := range test.results { + mAcc.AssertContainsTaggedFields(t, "pmu_metric", result.fields, result.tags) + } + }) + } +} + +func TestCheckFileDescriptors(t *testing.T) { + tests := []struct { + name string + uncores []*UncoreEventEntity + cores []*CoreEventEntity + estimation uint64 + maxFD []byte + fileLimit uint64 + errMsg string + }{ + {"exceed maximum file descriptors number", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}}, + []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}}, + 12020, []byte("11000"), 8000, fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", 12020, 11000), + }, + {"exceed soft file limit", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 800, fmt.Sprintf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", 11000, 800), + }, + {"no exceeds", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, + []*CoreEventEntity{{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 13000, "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mIntelPMU := IntelPMU{ + CoreEntities: test.cores, + UncoreEntities: test.uncores, + fileInfo: mFileInfo, + Log: testutil.Logger{}, + } + mFileInfo.On("readFile", fileMaxPath).Return(test.maxFD, nil). + On("fileLimit").Return(test.fileLimit, nil).Once() + + err := mIntelPMU.checkFileDescriptors() + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) + } +} + +func TestEstimateUncoreFd(t *testing.T) { + tests := []struct { + name string + entities []*UncoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"nil perf event", []*UncoreEventEntity{{parsedEvents: []*eventWithQuals{{"", nil, ia.CustomizableEvent{}}}, parsedSockets: makeIDs(0)}}, 0}, + {"one uncore entity", []*UncoreEventEntity{{parsedEvents: makeEvents(10, 10), parsedSockets: makeIDs(20)}}, 2000}, + {"nil entity", []*UncoreEventEntity{nil, {parsedEvents: makeEvents(1, 8), parsedSockets: makeIDs(1)}}, 8}, + {"many core entities", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}, + }, 11305}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{UncoreEntities: test.entities} + result, err := estimateUncoreFd(mIntelPMU.UncoreEntities) + require.Equal(t, test.result, result) + require.NoError(t, err) + }) + } +} + +func TestEstimateCoresFd(t *testing.T) { + tests := []struct { + name string + entities []*CoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"one core entity", []*CoreEventEntity{{parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"nil entity", []*CoreEventEntity{nil, {parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"many core entities", []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}, + }, 715}, + {"1024 events", []*CoreEventEntity{{parsedEvents: makeEvents(1024, 1), parsedCores: makeIDs(12)}}, 12288}, + {"big number", []*CoreEventEntity{{parsedEvents: makeEvents(1048576, 1), parsedCores: makeIDs(1024)}}, 1073741824}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{CoreEntities: test.entities} + result, err := estimateCoresFd(mIntelPMU.CoreEntities) + require.NoError(t, err) + require.Equal(t, test.result, result) + }) + } +} + +func makeEvents(number int, pmusNumber int) []*eventWithQuals { + a := make([]*eventWithQuals, number) + for i := range a { + b := make([]ia.NamedPMUType, pmusNumber) + for j := range b { + b[j] = ia.NamedPMUType{} + } + a[i] = &eventWithQuals{fmt.Sprintf("EVENT.%d", i), nil, + ia.CustomizableEvent{Event: &ia.PerfEvent{PMUTypes: b}}, + } + } + return a +} + +func makeIDs(number int) []int { + a := make([]int, number) + for i := range a { + a[i] = i + } + return a +} + +func TestReadMaxFD(t *testing.T) { + mFileReader := &mockFileInfoProvider{} + + t.Run("reader is nil", func(t *testing.T) { + result, err := readMaxFD(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file reader is nil") + require.Zero(t, result) + }) + + openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath) + parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath) + + tests := []struct { + name string + err error + content []byte + maxFD uint64 + failMsg string + }{ + {"read file error", fmt.Errorf("mock error"), nil, 0, openErrorMsg}, + {"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg}, + {"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg}, + {"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg}, + {"reading succeeded", nil, []byte("12343122"), 12343122, ""}, + {"min value reading", nil, []byte("0"), 0, ""}, + {"max uint 64 reading", nil, []byte("18446744073709551615"), math.MaxUint64, ""}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileReader.On("readFile", fileMaxPath).Return(test.content, test.err).Once() + result, err := readMaxFD(mFileReader) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + } else { + require.NoError(t, err) + } + require.Equal(t, test.maxFD, result) + mFileReader.AssertExpectations(t) + }) + } +} + +func TestAddFiles(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mError := errors.New("mock error") + + t.Run("no paths", func(t *testing.T) { + err := checkFiles([]string{}, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), "no paths were given") + }) + + t.Run("no file info provider", func(t *testing.T) { + err := checkFiles([]string{"path/1, path/2"}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file info provider is nil") + }) + + t.Run("stat error", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, mError).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file does not exist", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, os.ErrNotExist).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file is symlink", func(t *testing.T) { + file := "path/to/symlink" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeSymlink} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file doesn't point to a regular file", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeDir} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("checking succeeded", func(t *testing.T) { + paths := []string{"path/to/file1", "path/to/file2", "path/to/file3"} + fileInfo := fakeFileInfo{} + + for _, file := range paths { + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + } + + err := checkFiles(paths, mFileInfo) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) +} + +type fakeFileInfo struct { + fileMode os.FileMode +} + +func (f fakeFileInfo) Name() string { return "" } +func (f fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } +func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (f fakeFileInfo) IsDir() bool { return false } +func (f fakeFileInfo) Sys() interface{} { return nil } diff --git a/plugins/inputs/intel_pmu/mocks.go b/plugins/inputs/intel_pmu/mocks.go new file mode 100644 index 0000000000000..82799b26f2b04 --- /dev/null +++ b/plugins/inputs/intel_pmu/mocks.go @@ -0,0 +1,407 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "os" + + "github.com/intel/iaevents" + "github.com/stretchr/testify/mock" +) + +// mockValuesReader is an autogenerated mock type for the valuesReader type +type mockValuesReader struct { + mock.Mock +} + +// readValue provides a mock function with given fields: event +func (_m *mockValuesReader) readValue(event *iaevents.ActiveEvent) (iaevents.CounterValue, error) { + ret := _m.Called(event) + + var r0 iaevents.CounterValue + if rf, ok := ret.Get(0).(func(*iaevents.ActiveEvent) iaevents.CounterValue); ok { + r0 = rf(event) + } else { + r0 = ret.Get(0).(iaevents.CounterValue) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*iaevents.ActiveEvent) error); ok { + r1 = rf(event) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockEntitiesValuesReader is an autogenerated mock type for the entitiesValuesReader type +type mockEntitiesValuesReader struct { + mock.Mock +} + +// readEntities provides a mock function with given fields: _a0, _a1 +func (_m *mockEntitiesValuesReader) readEntities(_a0 []*CoreEventEntity, _a1 []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + ret := _m.Called(_a0, _a1) + + var r0 []coreMetric + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) []coreMetric); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coreMetric) + } + } + + var r1 []uncoreMetric + if rf, ok := ret.Get(1).(func([]*CoreEventEntity, []*UncoreEventEntity) []uncoreMetric); ok { + r1 = rf(_a0, _a1) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]uncoreMetric) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r2 = rf(_a0, _a1) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockEntitiesActivator is an autogenerated mock type for the entitiesActivator type +type mockEntitiesActivator struct { + mock.Mock +} + +// activateEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesParser is an autogenerated mock type for the entitiesParser type +type mockEntitiesParser struct { + mock.Mock +} + +// parseEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesResolver is an autogenerated mock type for the entitiesResolver type +type mockEntitiesResolver struct { + mock.Mock +} + +// resolveEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEventsActivator is an autogenerated mock type for the eventsActivator type +type mockEventsActivator struct { + mock.Mock +} + +// activateEvent provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateEvent(_a0 iaevents.Activator, _a1 iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveEvent + if rf, ok := ret.Get(0).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateGroup provides a mock function with given fields: _a0, _a1 +func (_m *mockEventsActivator) activateGroup(_a0 iaevents.PlacementProvider, _a1 []iaevents.CustomizableEvent) (*iaevents.ActiveEventGroup, error) { + ret := _m.Called(_a0, _a1) + + var r0 *iaevents.ActiveEventGroup + if rf, ok := ret.Get(0).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) *iaevents.ActiveEventGroup); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEventGroup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateMulti provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateMulti(_a0 iaevents.MultiActivator, _a1 []iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveMultiEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveMultiEvent + if rf, ok := ret.Get(0).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveMultiEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveMultiEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockFileInfoProvider is an autogenerated mock type for the fileInfoProvider type +type mockFileInfoProvider struct { + mock.Mock +} + +// fileLimit provides a mock function with given fields: +func (_m *mockFileInfoProvider) fileLimit() (uint64, error) { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) readFile(_a0 string) ([]byte, error) { + ret := _m.Called(_a0) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// lstat provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) lstat(_a0 string) (os.FileInfo, error) { + ret := _m.Called(_a0) + + var r0 os.FileInfo + if rf, ok := ret.Get(0).(func(string) os.FileInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(os.FileInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockPlacementMaker is an autogenerated mock type for the placementMaker type +type mockPlacementMaker struct { + mock.Mock +} + +// makeCorePlacements provides a mock function with given fields: cores, perfEvent +func (_m *mockPlacementMaker) makeCorePlacements(cores []int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(cores, factory) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func([]int, iaevents.PlacementFactory) []iaevents.PlacementProvider); ok { + r0 = rf(cores, factory) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]int, iaevents.PlacementFactory) error); ok { + r1 = rf(cores, factory) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// makeUncorePlacements provides a mock function with given fields: factory, socket +func (_m *mockPlacementMaker) makeUncorePlacements(socket int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(factory, socket) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func(iaevents.PlacementFactory, int) []iaevents.PlacementProvider); ok { + r0 = rf(factory, socket) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementFactory, int) error); ok { + r1 = rf(factory, socket) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockSysInfoProvider is an autogenerated mock type for the sysInfoProvider type +type mockSysInfoProvider struct { + mock.Mock +} + +// allCPUs provides a mock function with given fields: +func (_m *mockSysInfoProvider) allCPUs() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// allSockets provides a mock function with given fields: +func (_m *mockSysInfoProvider) allSockets() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockTransformer is an autogenerated mock type for the Transformer type +type MockTransformer struct { + mock.Mock +} + +// Transform provides a mock function with given fields: reader, matcher +func (_m *MockTransformer) Transform(reader iaevents.Reader, matcher iaevents.Matcher) ([]*iaevents.PerfEvent, error) { + ret := _m.Called(reader, matcher) + + var r0 []*iaevents.PerfEvent + if rf, ok := ret.Get(0).(func(iaevents.Reader, iaevents.Matcher) []*iaevents.PerfEvent); ok { + r0 = rf(reader, matcher) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*iaevents.PerfEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Reader, iaevents.Matcher) error); ok { + r1 = rf(reader, matcher) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/intel_pmu/reader.go b/plugins/inputs/intel_pmu/reader.go new file mode 100644 index 0000000000000..2df72a96618df --- /dev/null +++ b/plugins/inputs/intel_pmu/reader.go @@ -0,0 +1,249 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "time" + + ia "github.com/intel/iaevents" + "golang.org/x/sync/errgroup" +) + +type coreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + tag string + cpu int + + time time.Time +} + +type uncoreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + unitType string + unit string + tag string + socket int + + agg bool + + time time.Time +} + +type valuesReader interface { + readValue(event *ia.ActiveEvent) (ia.CounterValue, error) +} + +type iaValuesReader struct{} + +func (iaValuesReader) readValue(event *ia.ActiveEvent) (ia.CounterValue, error) { + return event.ReadValue() +} + +type entitiesValuesReader interface { + readEntities([]*CoreEventEntity, []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) +} + +type iaEntitiesValuesReader struct { + eventReader valuesReader + timer clock +} + +type clock interface { + now() time.Time +} + +type realClock struct{} + +func (realClock) now() time.Time { + return time.Now() +} + +func (ie *iaEntitiesValuesReader) readEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + var coreMetrics []coreMetric + var uncoreMetrics []uncoreMetric + + for _, entity := range coreEntities { + newMetrics, err := ie.readCoreEvents(entity) + if err != nil { + return nil, nil, err + } + coreMetrics = append(coreMetrics, newMetrics...) + } + for _, entity := range uncoreEntities { + newMetrics, err := ie.readUncoreEvents(entity) + if err != nil { + return nil, nil, err + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + return coreMetrics, uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + metrics := make([]coreMetric, len(entity.activeEvents)) + errGroup := errgroup.Group{} + + for i, event := range entity.activeEvents { + id := i + actualEvent := event + + if event == nil || event.PerfEvent == nil { + return nil, fmt.Errorf("active event or corresponding perf event is nil") + } + + errGroup.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err) + } + cpu, _ := actualEvent.PMUPlacement() + newMetric := coreMetric{ + values: values, + tag: entity.EventsTag, + cpu: cpu, + name: actualEvent.PerfEvent.Name, + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + } + err := errGroup.Wait() + if err != nil { + return nil, err + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) { + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + var uncoreMetrics []uncoreMetric + + for _, event := range entity.activeMultiEvents { + if entity.Aggregate { + newMetric, err := ie.readMultiEventAgg(event) + if err != nil { + return nil, err + } + newMetric.tag = entity.EventsTag + uncoreMetrics = append(uncoreMetrics, newMetric) + } else { + newMetrics, err := ie.readMultiEventSeparately(event) + if err != nil { + return nil, err + } + for i := range newMetrics { + newMetrics[i].tag = entity.EventsTag + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + } + return uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return nil, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + metrics := make([]uncoreMetric, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + newMetric := uncoreMetric{ + values: values, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: actualEvent.PMUName(), + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + err := group.Wait() + if err != nil { + return nil, err + } + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return uncoreMetric{}, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return uncoreMetric{}, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + values := make([]ia.CounterValue, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + value, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + values[id] = value + return nil + }) + } + err := group.Wait() + if err != nil { + return uncoreMetric{}, err + } + + bRaw, bEnabled, bRunning := ia.AggregateValues(values) + if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() { + return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent) + } + aggValues := ia.CounterValue{ + Raw: bRaw.Uint64(), + Enabled: bEnabled.Uint64(), + Running: bRunning.Uint64(), + } + newMetric := uncoreMetric{ + values: aggValues, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: ie.timer.now(), + } + return newMetric, nil +} diff --git a/plugins/inputs/intel_pmu/reader_test.go b/plugins/inputs/intel_pmu/reader_test.go new file mode 100644 index 0000000000000..409393383056f --- /dev/null +++ b/plugins/inputs/intel_pmu/reader_test.go @@ -0,0 +1,522 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "math" + "testing" + "time" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type moonClock struct{} + +func (moonClock) now() time.Time { + return time.Date(1969, 7, 20, 20, 17, 0, 0, time.UTC) +} + +type eventWithValues struct { + activeEvent *ia.ActiveEvent + values ia.CounterValue +} + +func TestReadCoreEvents(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("nil events", func(t *testing.T) { + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, nil) + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), "active event or corresponding perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}} + + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, event) + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + entity := &CoreEventEntity{} + var expected []coreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event2"}}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event3"}}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + entity.activeEvents = append(entity.activeEvents, tc.activeEvent) + cpu, _ := tc.activeEvent.PMUPlacement() + newMetric := coreMetric{ + values: tc.values, + tag: entity.EventsTag, + cpu: cpu, + name: tc.activeEvent.PerfEvent.Name, + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventSeparately(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("multi event is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{&iaValuesReader{}, moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "no active events or perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + perfEvent := &ia.PerfEvent{Name: "event"} + + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + multi := multiEvent{perfEvent: perfEvent} + var expected []uncoreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + multi.activeEvents = append(multi.activeEvents, tc.activeEvent) + newMetric := uncoreMetric{ + values: tc.values, + socket: multi.socket, + unitType: multi.perfEvent.PMUName, + name: multi.perfEvent.Name, + unit: tc.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventAgg(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + errMock := fmt.Errorf("mock error") + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + + tests := []struct { + name string + multi multiEvent + events []eventWithValues + result ia.CounterValue + readFail bool + errMsg string + }{ + { + name: "no events", + multi: multiEvent{perfEvent: perfEvent}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "no perf event", + multi: multiEvent{perfEvent: nil, activeEvents: []*ia.ActiveEvent{{}, {}}}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "successful reading and aggregation", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 5123, Enabled: 1231242, Running: 41123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4500, Enabled: 1823423, Running: 182343}}, + }, + result: ia.CounterValue{Raw: 9623, Enabled: 3054665, Running: 223466}, + errMsg: "", + }, + { + name: "to big numbers", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: math.MaxUint64, Enabled: 0, Running: 0}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}}, + }, + result: ia.CounterValue{}, + errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent), + }, + { + name: "reading fail", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 0, Enabled: 0, Running: 0}}, + }, + readFail: true, + result: ia.CounterValue{}, + errMsg: "failed to read uncore event", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for _, eventWithValue := range test.events { + test.multi.activeEvents = append(test.multi.activeEvents, eventWithValue.activeEvent) + if test.readFail { + mReader.On("readValue", eventWithValue.activeEvent).Return(ia.CounterValue{}, errMock).Once() + continue + } + mReader.On("readValue", eventWithValue.activeEvent).Return(eventWithValue.values, nil).Once() + } + metric, err := mEntitiesReader.readMultiEventAgg(test.multi) + mReader.AssertExpectations(t) + + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + expected := uncoreMetric{ + values: test.result, + socket: test.multi.socket, + unitType: test.multi.perfEvent.PMUName, + name: test.multi.perfEvent.Name, + time: mTimer.now(), + } + require.NoError(t, err) + require.Equal(t, expected, metric) + }) + } +} + +func TestReadUncoreEvents(t *testing.T) { + errMock := fmt.Errorf("mock error") + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("read aggregated entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + newMetric := uncoreMetric{ + values: ia.CounterValue{Raw: 6008, Enabled: 0, Running: 0}, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: mTimer.now(), + } + newMetric2 := uncoreMetric{ + values: ia.CounterValue{Raw: 125008, Enabled: 0, Running: 0}, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + time: mTimer.now(), + } + expected := []uncoreMetric{newMetric, newMetric2} + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entityAgg) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) + + t.Run("read distributed entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent, socket: 2} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2, socket: 1} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + var expected []uncoreMetric + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + entity := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) +} + +func TestReadEntities(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("read entities", func(t *testing.T) { + values := ia.CounterValue{} + socket := 0 + + corePerfEvent := &ia.PerfEvent{Name: "core event 1", PMUName: "cpu"} + activeCoreEvent := []*ia.ActiveEvent{{PerfEvent: corePerfEvent}} + coreMetric1 := coreMetric{values: values, name: corePerfEvent.Name, time: mTimer.now()} + + corePerfEvent2 := &ia.PerfEvent{Name: "core event 2", PMUName: "cpu"} + activeCoreEvent2 := []*ia.ActiveEvent{{PerfEvent: corePerfEvent2}} + coreMetric2 := coreMetric{values: values, name: corePerfEvent2.Name, time: mTimer.now()} + + uncorePerfEvent := &ia.PerfEvent{Name: "uncore event 1", PMUName: "cbox"} + activeUncoreEvent := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent}} + uncoreMetric1 := uncoreMetric{ + values: values, + name: uncorePerfEvent.Name, + unitType: uncorePerfEvent.PMUName, + socket: socket, + time: mTimer.now(), + } + + uncorePerfEvent2 := &ia.PerfEvent{Name: "uncore event 2", PMUName: "rig"} + activeUncoreEvent2 := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent2}} + uncoreMetric2 := uncoreMetric{ + values: values, + name: uncorePerfEvent2.Name, + unitType: uncorePerfEvent2.PMUName, + socket: socket, + time: mTimer.now(), + } + + coreEntities := []*CoreEventEntity{{activeEvents: activeCoreEvent}, {activeEvents: activeCoreEvent2}} + + uncoreEntities := []*UncoreEventEntity{ + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent, perfEvent: uncorePerfEvent, socket: socket}}}, + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent2, perfEvent: uncorePerfEvent2, socket: socket}}}, + } + + expectedCoreMetrics := []coreMetric{coreMetric1, coreMetric2} + expectedUncoreMetrics := []uncoreMetric{uncoreMetric1, uncoreMetric2} + + mReader.On("readValue", activeCoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeCoreEvent2[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent2[0]).Return(values, nil).Once() + + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, uncoreEntities) + + require.NoError(t, err) + require.Equal(t, expectedCoreMetrics, coreMetrics) + require.NotNil(t, expectedUncoreMetrics, uncoreMetrics) + mReader.AssertExpectations(t) + }) + + t.Run("core entity reading failed", func(t *testing.T) { + coreEntities := []*CoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) + + t.Run("uncore entity reading failed", func(t *testing.T) { + uncoreEntities := []*UncoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(nil, uncoreEntities) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) +} diff --git a/plugins/inputs/intel_pmu/resolver.go b/plugins/inputs/intel_pmu/resolver.go new file mode 100644 index 0000000000000..8457f48ca14db --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver.go @@ -0,0 +1,150 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + ia "github.com/intel/iaevents" +) + +type entitiesResolver interface { + resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesResolver struct { + reader ia.Reader + transformer ia.Transformer + log telegraf.Logger +} + +func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, entity := range coreEntities { + if entity == nil { + return fmt.Errorf("core entity is nil") + } + if entity.allEvents { + newEvents, _, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed core event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err) + } + if customEvent.Event.Uncore { + return fmt.Errorf("uncore event `%s` found in core entity", event.name) + } + event.custom = customEvent + } + } + for _, entity := range uncoreEntities { + if entity == nil { + return fmt.Errorf("uncore entity is nil") + } + if entity.allEvents { + _, newEvents, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed uncore event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err) + } + if !customEvent.Event.Uncore { + return fmt.Errorf("core event `%s` found in uncore entity", event.name) + } + event.custom = customEvent + } + } + return nil +} + +func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, uncoreEvents []*eventWithQuals, err error) { + if e.transformer == nil { + return nil, nil, errors.New("transformer is nil") + } + + perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher()) + if err != nil { + re, ok := err.(*ia.TransformationError) + if !ok { + return nil, nil, err + } + if e.log != nil && re != nil { + var eventErrs []string + for _, eventErr := range re.Errors() { + if eventErr == nil { + continue + } + eventErrs = append(eventErrs, eventErr.Error()) + } + errorsStr := strings.Join(eventErrs, ",\n") + e.log.Warnf("Cannot resolve all of the events from provided files:\n%s.\nSome events may be omitted.", errorsStr) + } + } + + for _, perfEvent := range perfEvents { + newEvent := &eventWithQuals{ + name: perfEvent.Name, + custom: ia.CustomizableEvent{Event: perfEvent}, + } + // build options for event + newEvent.custom.Options, err = ia.NewOptions().Build() + if err != nil { + return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err) + } + if perfEvent.Uncore { + uncoreEvents = append(uncoreEvents, newEvent) + continue + } + coreEvents = append(coreEvents, newEvent) + } + return coreEvents, uncoreEvents, nil +} + +func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.CustomizableEvent, error) { + var custom ia.CustomizableEvent + if e.transformer == nil { + return custom, errors.New("events transformer is nil") + } + if name == "" { + return custom, errors.New("event name is empty") + } + matcher := ia.NewNameMatcher(name) + perfEvents, err := e.transformer.Transform(e.reader, matcher) + if err != nil { + return custom, fmt.Errorf("failed to transform perf events: %v", err) + } + if len(perfEvents) < 1 { + return custom, fmt.Errorf("failed to resolve unknown event `%s`", name) + } + // build options for event + options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + if err != nil { + return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err) + } + custom = ia.CustomizableEvent{ + Event: perfEvents[0], + Options: options, + } + return custom, nil +} diff --git a/plugins/inputs/intel_pmu/resolver_test.go b/plugins/inputs/intel_pmu/resolver_test.go new file mode 100644 index 0000000000000..176b6d133772c --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver_test.go @@ -0,0 +1,376 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestResolveEntities(t *testing.T) { + errMock := errors.New("mock error") + mLog := testutil.Logger{} + mTransformer := &MockTransformer{} + mResolver := &iaEntitiesResolver{transformer: mTransformer, log: mLog} + + type test struct { + perfEvent *ia.PerfEvent + options ia.Options + event *eventWithQuals + } + + t.Run("nil entities", func(t *testing.T) { + err := mResolver.resolveEntities([]*CoreEventEntity{nil}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "core entity is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{nil}) + + require.Error(t, err) + require.Contains(t, err.Error(), "uncore entity is nil") + }) + + t.Run("nil parsed events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed core event is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed uncore event is nil") + }) + + t.Run("fail to resolve core events", func(t *testing.T) { + name := "mock event 1" + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve uncore events", func(t *testing.T) { + name := "mock event 1" + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve all core and uncore events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{allEvents: true} + mUncoreEntity := &UncoreEventEntity{allEvents: true} + corePerfEvents := []*ia.PerfEvent{ + {Name: "core event1"}, + {Name: "core event2"}, + {Name: "core event3"}, + } + uncorePerfEvents := []*ia.PerfEvent{ + {Name: "uncore event1", Uncore: true}, + {Name: "uncore event2", Uncore: true}, + {Name: "uncore event3", Uncore: true}, + } + matcher := ia.NewNameMatcher() + + t.Run("fail to resolve all core events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all uncore events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all events with transformationError", func(t *testing.T) { + transformErr := &ia.TransformationError{} + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, transformErr).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, transformErr).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, nil).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, nil).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + t.Run("uncore event found in core entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "uncore event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("core event found in uncore entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "core event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve core and uncore events", func(t *testing.T) { + var mCoreEvents []*eventWithQuals + var nUncoreEvents []*eventWithQuals + + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + emptyOptions, _ := ia.NewOptions().Build() + + coreTestCases := []test{ + {event: &eventWithQuals{name: "core1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "core1"}}, + {event: &eventWithQuals{name: "core2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core2"}}, + {event: &eventWithQuals{name: "core3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core3"}}, + } + uncoreTestCases := []test{ + {event: &eventWithQuals{name: "uncore1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "uncore1", Uncore: true}}, + {event: &eventWithQuals{name: "uncore2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore2", Uncore: true}}, + {event: &eventWithQuals{name: "uncore3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore3", Uncore: true}}, + } + + for _, test := range coreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + mCoreEvents = append(mCoreEvents, test.event) + } + + for _, test := range uncoreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + nUncoreEvents = append(nUncoreEvents, test.event) + } + + mCoreEntity := &CoreEventEntity{parsedEvents: mCoreEvents, allEvents: false} + mUncoreEntity := &UncoreEventEntity{parsedEvents: nUncoreEvents, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + + require.NoError(t, err) + for _, test := range append(coreTestCases, uncoreTestCases...) { + require.Equal(t, test.perfEvent, test.event.custom.Event) + require.Equal(t, test.options, test.event.custom.Options) + } + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveAllEvents(t *testing.T) { + mTransformer := &MockTransformer{} + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("no events", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, _, err := mResolver.resolveAllEvents() + require.NoError(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully resolved events", func(t *testing.T) { + perfEvent1 := &ia.PerfEvent{Name: "mock1"} + perfEvent2 := &ia.PerfEvent{Name: "mock2"} + uncorePerfEvent1 := &ia.PerfEvent{Name: "mock3", Uncore: true} + uncorePerfEvent2 := &ia.PerfEvent{Name: "mock4", Uncore: true} + + options, _ := ia.NewOptions().Build() + perfEvents := []*ia.PerfEvent{perfEvent1, perfEvent2, uncorePerfEvent1, uncorePerfEvent2} + + expectedCore := []*eventWithQuals{ + {name: perfEvent1.Name, custom: ia.CustomizableEvent{Event: perfEvent1, Options: options}}, + {name: perfEvent2.Name, custom: ia.CustomizableEvent{Event: perfEvent2, Options: options}}, + } + + expectedUncore := []*eventWithQuals{ + {name: uncorePerfEvent1.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent1, Options: options}}, + {name: uncorePerfEvent2.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent2, Options: options}}, + } + + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(perfEvents, nil) + + coreEvents, uncoreEvents, err := mResolver.resolveAllEvents() + require.NoError(t, err) + require.Equal(t, expectedCore, coreEvents) + require.Equal(t, expectedUncore, uncoreEvents) + + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveEvent(t *testing.T) { + mTransformer := &MockTransformer{} + mEvent := "mock event" + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, err := mResolver.resolveEvent("event", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "events transformer is nil") + }) + + t.Run("event is empty", func(t *testing.T) { + _, err := mResolver.resolveEvent("", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event name is empty") + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to transform perf events") + mTransformer.AssertExpectations(t) + }) + + t.Run("no events transformed", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve unknown event") + mTransformer.AssertExpectations(t) + }) + + t.Run("not valid qualifiers", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"wrong modifiers"} + + matcher := ia.NewNameMatcher(event) + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + _, err := mResolver.resolveEvent(event, qualifiers) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event)) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully transformed", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"config1=0x012h", "config2=0x034k"} + + matcher := ia.NewNameMatcher(event) + + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + + expectedOptions, _ := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + customEvent, err := mResolver.resolveEvent(event, qualifiers) + require.NoError(t, err) + require.Equal(t, mPerfEvent, customEvent.Event) + require.Equal(t, expectedOptions, customEvent.Options) + mTransformer.AssertExpectations(t) + }) +} From cd0a7cd52f8be2225db9160722e4e15517656eb1 Mon Sep 17 00:00:00 2001 From: rentiansheng Date: Wed, 24 Nov 2021 06:05:23 +0800 Subject: [PATCH 058/133] fix: input plugin statsd bug (#10116) --- config/config.go | 3 ++- plugins/inputs/statsd/statsd.go | 6 ++++++ plugins/inputs/statsd/statsd_test.go | 27 +++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 9333e32ab0b9a..97f9c35b3ab55 100644 --- a/config/config.go +++ b/config/config.go @@ -165,7 +165,8 @@ type AgentConfig struct { // TODO(cam): Remove UTC and parameter, they are no longer // valid for the agent config. Leaving them here for now for backwards- // compatibility - UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect + // Deprecated: 1.0.0 after, has no effect + UTC bool `toml:"utc"` // Debug is the option for running in debug mode Debug bool `toml:"debug"` diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index d23a79225c392..861d2561a85a8 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -797,6 +797,12 @@ func parseKeyValue(keyValue string) (key string, val string) { val = split[1] } else if len(split) == 1 { val = split[0] + } else if len(split) > 2 { + // fix: https://github.com/influxdata/telegraf/issues/10113 + // fix: value has "=" parse error + // uri=/service/endpoint?sampleParam={paramValue} parse value key="uri", val="/service/endpoint?sampleParam\={paramValue}" + key = split[0] + val = strings.Join(split[1:], "=") } return key, val diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 48889aa43bf67..5121f06b6b8f7 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -1674,3 +1674,30 @@ func TestParse_Ints(t *testing.T) { require.NoError(t, s.Gather(acc)) require.Equal(t, s.Percentiles, []Number{90.0}) } + +func TestParse_KeyValue(t *testing.T) { + type output struct { + key string + val string + } + + validLines := []struct { + input string + output output + }{ + {"", output{"", ""}}, + {"only value", output{"", "only value"}}, + {"key=value", output{"key", "value"}}, + {"url=/api/querystring?key1=val1&key2=value", output{"url", "/api/querystring?key1=val1&key2=value"}}, + } + + for _, line := range validLines { + key, val := parseKeyValue(line.input) + if key != line.output.key { + t.Errorf("line: %s, key expected %s, actual %s", line, line.output.key, key) + } + if val != line.output.val { + t.Errorf("line: %s, val expected %s, actual %s", line, line.output.val, val) + } + } +} From ba8c29acb0e5e4588b8a9987ed8421d480d15c85 Mon Sep 17 00:00:00 2001 From: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Date: Tue, 23 Nov 2021 16:07:21 -0600 Subject: [PATCH 059/133] feat(inputs.win_services): add exclude filter (#10144) Co-authored-by: Tomas Mikenda --- plugins/inputs/win_services/README.md | 27 +++++++++++-------- plugins/inputs/win_services/win_services.go | 8 +++--- .../inputs/win_services/win_services_test.go | 21 +++++++++++++++ 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/win_services/README.md b/plugins/inputs/win_services/README.md index 1d7aa63568949..37cf1a22280e9 100644 --- a/plugins/inputs/win_services/README.md +++ b/plugins/inputs/win_services/README.md @@ -4,25 +4,27 @@ Reports information about Windows service status. Monitoring some services may require running Telegraf with administrator privileges. -### Configuration: +## Configuration ```toml [[inputs.win_services]] - ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. + ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. Case sensitive. service_names = [ "LanmanServer", "TermService", "Win*", ] + excluded_service_names = ['WinRM'] # optional, list of service names to exclude ``` -### Measurements & Fields: +### Measurements & Fields - win_services - - state : integer - - startup_mode : integer + - state : integer + - startup_mode : integer The `state` field can have the following values: + - 1 - stopped - 2 - start pending - 3 - stop pending @@ -32,30 +34,33 @@ The `state` field can have the following values: - 7 - paused The `startup_mode` field can have the following values: + - 0 - boot start - 1 - system start - 2 - auto start - 3 - demand start - 4 - disabled -### Tags: +### Tags - All measurements have the following tags: - - service_name - - display_name + - service_name + - display_name -### Example Output: -``` +### Example Output + +```shell win_services,host=WIN2008R2H401,display_name=Server,service_name=LanmanServer state=4i,startup_mode=2i 1500040669000000000 win_services,display_name=Remote\ Desktop\ Services,service_name=TermService,host=WIN2008R2H401 state=1i,startup_mode=3i 1500040669000000000 ``` + ### TICK Scripts A sample TICK script for a notification about a not running service. It sends a notification whenever any service changes its state to be not _running_ and when it changes that state back to _running_. The notification is sent via an HTTP POST call. -``` +```shell stream |from() .database('telegraf') diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 38f873a99284d..8770ae739877a 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -86,6 +86,7 @@ var sampleConfig = ` "TermService", "Win*", ] + #excluded_service_names = [] # optional, list of service names to exclude ` var description = "Input plugin to report Windows services info." @@ -94,8 +95,9 @@ var description = "Input plugin to report Windows services info." type WinServices struct { Log telegraf.Logger - ServiceNames []string `toml:"service_names"` - mgrProvider ManagerProvider + ServiceNames []string `toml:"service_names"` + ServiceNamesExcluded []string `toml:"excluded_service_names"` + mgrProvider ManagerProvider servicesFilter filter.Filter } @@ -109,7 +111,7 @@ type ServiceInfo struct { func (m *WinServices) Init() error { var err error - m.servicesFilter, err = filter.NewIncludeExcludeFilter(m.ServiceNames, nil) + m.servicesFilter, err = filter.NewIncludeExcludeFilter(m.ServiceNames, m.ServiceNamesExcluded) if err != nil { return err } diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 153c8dfdd8a10..b0720169d9db2 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -225,3 +225,24 @@ func TestGatherContainsTag(t *testing.T) { acc1.AssertContainsTaggedFields(t, "win_services", fields, tags) } } + +func TestExcludingNamesTag(t *testing.T) { + winServices := &WinServices{ + Log: testutil.Logger{}, + ServiceNamesExcluded: []string{"Service*"}, + mgrProvider: &FakeMgProvider{testSimpleData[0]}, + } + winServices.Init() + var acc1 testutil.Accumulator + require.NoError(t, winServices.Gather(&acc1)) + + for _, s := range testSimpleData[0].services { + fields := make(map[string]interface{}) + tags := make(map[string]string) + fields["state"] = int(s.state) + fields["startup_mode"] = int(s.startUpMode) + tags["service_name"] = s.serviceName + tags["display_name"] = s.displayName + acc1.AssertDoesNotContainsTaggedFields(t, "win_services", fields, tags) + } +} From 2b439341742c6cda3cbd1a5ec92e9c37d402940e Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 23 Nov 2021 15:09:00 -0700 Subject: [PATCH 060/133] fix: removed snmptranslate from readme and fix default path (#10136) --- plugins/inputs/snmp_trap/README.md | 40 ++++++++++----------------- plugins/inputs/snmp_trap/snmp_trap.go | 1 + 2 files changed, 15 insertions(+), 26 deletions(-) diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index f117c35cbeb56..a305fe49bab8b 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -6,21 +6,8 @@ notifications (traps and inform requests). Notifications are received on plain UDP. The port to listen is configurable. -### Prerequisites +## Configuration -This plugin uses the `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. - -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. - -### Configuration ```toml [[inputs.snmp_trap]] ## Transport, local address, and port to listen on. Transport must @@ -55,7 +42,7 @@ information. # priv_password = "" ``` -#### Using a Privileged Port +### Using a Privileged Port On many operating systems, listening on a privileged port (a port number less than 1024) requires extra permission. Since the default @@ -73,7 +60,7 @@ the privileged port. To use a privileged port on Linux, you can use setcap to enable the CAP_NET_BIND_SERVICE capability on the telegraf binary: -``` +```shell setcap cap_net_bind_service=+ep /usr/bin/telegraf ``` @@ -84,21 +71,22 @@ On Mac OS, listening on privileged ports is unrestricted on versions - snmp_trap - tags: - - source (string, IP address of trap source) - - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) - - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) - - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) - - version (string, "1" or "2c" or "3") - - context_name (string, value from v3 trap) - - engine_id (string, value from v3 trap) - - community (string, value from 1 or 2c trap) + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - context_name (string, value from v3 trap) + - engine_id (string, value from v3 trap) + - community (string, value from 1 or 2c trap) - fields: - - Fields are mapped from variables in the trap. Field names are + - Fields are mapped from variables in the trap. Field names are the trap variable names after MIB lookup. Field values are trap variable values. ### Example Output -``` + +```shell snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c,community=public snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c,community=public sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 ``` diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 9fffd8968d593..28ae24adde62a 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -105,6 +105,7 @@ func init() { lookupFunc: lookup, ServiceAddress: "udp://:162", Timeout: defaultTimeout, + Path: []string{"/usr/share/snmp/mibs"}, Version: "2c", } }) From 6518745153d3b141811a096b1ff621141ac2bf54 Mon Sep 17 00:00:00 2001 From: reimda Date: Tue, 23 Nov 2021 15:11:00 -0700 Subject: [PATCH 061/133] fix: Add setting to win_perf_counters input to ignore localization (#10101) --- plugins/inputs/win_perf_counters/README.md | 79 +++++++++--- .../win_perf_counters/win_perf_counters.go | 112 +++++++++++++++--- .../win_perf_counters_integration_test.go | 37 +++++- .../win_perf_counters_test.go | 44 +++++++ 4 files changed, 233 insertions(+), 39 deletions(-) diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index de45386a764a1..dcc15d6380e34 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -20,6 +20,10 @@ as counters used when performance monitoring This file is likely to be updated in the future with more examples for useful configurations for separate scenarios. +For more information on concepts and terminology including object, +counter, and instance names, see the help in the Windows Performance +Monitor app. + ### Plugin wide Plugin wide entries are underneath `[[inputs.win_perf_counters]]`. @@ -33,19 +37,39 @@ Example: #### UseWildcardsExpansion -If `UseWildcardsExpansion` is set to true, wildcards can be used in the -instance name and the counter name. When using localized Windows, counters -will be also be localized. Instance indexes will also be returned in the -instance name. +If `UseWildcardsExpansion` is true, wildcards can be used in the +instance name and the counter name. Instance indexes will also be +returned in the instance name. -Partial wildcards (e.g. `chrome*`) are supported only in the instance name on Windows Vista and newer. +Partial wildcards (e.g. `chrome*`) are supported only in the instance +name on Windows Vista and newer. -If disabled, wildcards (not partial) in instance names can still be used, but -instance indexes will not be returned in the instance names. +If disabled, wildcards (not partial) in instance names can still be +used, but instance indexes will not be returned in the instance names. Example: `UseWildcardsExpansion=true` +#### LocalizeWildcardsExpansion + +`LocalizeWildcardsExpansion` selects whether object and counter names +are localized when `UseWildcardsExpansion` is true and Telegraf is +running on a localized installation of Windows. + +When `LocalizeWildcardsExpansion` is true, Telegraf produces metrics +with localized tags and fields even when object and counter names are +in English. + +When `LocalizeWildcardsExpansion` is false, Telegraf expects object +and counter names to be in English and produces metrics with English +tags and fields. + +When `LocalizeWildcardsExpansion` is false, wildcards can only be used +in instances. Object and counter names must not have wildcards. + +Example: +`LocalizeWildcardsExpansion=true` + #### CountersRefreshInterval Configured counters are matched against available counters at the interval @@ -63,7 +87,7 @@ Example: #### PreVistaSupport -_Deprecated. Necessary features on Windows Vista and newer are checked dynamically_ +(Deprecated. Necessary features on Windows Vista and newer are checked dynamically) Bool, if set to `true`, the plugin will use the localized PerfCounter interface that has been present since before Vista for backwards compatibility. @@ -74,7 +98,7 @@ Example for Windows Server 2003, this would be set to true: #### UsePerfCounterTime -Bool, if set to `true` will request a timestamp along with the PerfCounter data. +Bool, if set to `true` will request a timestamp along with the PerfCounter data. If se to `false`, current time will be used. Supported on Windows Vista/Windows Server 2008 and newer @@ -86,6 +110,7 @@ Example: See Entry below. ### Entry + A new configuration entry consists of the TOML header starting with, `[[inputs.win_perf_counters.object]]`. This must follow before other plugin configurations, @@ -94,14 +119,16 @@ beneath the main win_perf_counters entry, `[[inputs.win_perf_counters]]`. Following this are 3 required key/value pairs and three optional parameters and their usage. #### ObjectName -**Required** + +(Required) ObjectName is the Object to query for, like Processor, DirectoryServices, LogicalDisk or similar. Example: `ObjectName = "LogicalDisk"` #### Instances -**Required** + +(Required) The instances key (this is an array) declares the instances of a counter you would like returned, it can be one or more values. @@ -121,7 +148,8 @@ Here only one option is valid if you want data back, and that is to specify `Instances = ["------"]`. #### Counters -**Required** + +(Required) The Counters key (this is an array) declares the counters of the ObjectName you would like returned, it can also be one or more values. @@ -133,7 +161,8 @@ This must be specified for every counter you want the results of, or use is set to `true`. #### Measurement -*Optional* + +(Optional) This key is optional. If it is not set it will be `win_perf_counters`. In InfluxDB this is the key underneath which the returned data is stored. @@ -144,7 +173,8 @@ separately from Processor results. Example: `Measurement = "win_disk"`` #### IncludeTotal -*Optional* + +(Optional) This key is optional. It is a simple bool. If it is not set to true or included it is treated as false. @@ -154,7 +184,8 @@ like `_Total`, `0,_Total` and so on where applicable (Processor Information is one example). #### WarnOnMissing -*Optional* + +(Optional) This key is optional. It is a simple bool. If it is not set to true or included it is treated as false. @@ -163,7 +194,8 @@ It will print out any ObjectName/Instance/Counter combinations asked for that do not match. Useful when debugging new configurations. #### FailOnMissing -*Internal* + +(Internal) This key should not be used. It is for testing purposes only. It is a simple bool. If it is not set to true or included this is treated as false. @@ -173,6 +205,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ## Examples ### Generic Queries + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -217,6 +250,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### Active Directory Domain Controller + ```toml [[inputs.win_perf_counters]] [inputs.win_perf_counters.tags] @@ -245,6 +279,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### DFS Namespace + Domain Controllers + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -258,6 +293,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### DFS Replication + Domain Controllers + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -271,6 +307,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### DNS Server + Domain Controllers + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -282,6 +319,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### IIS / ASP.NET + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -326,6 +364,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### Process + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -338,6 +377,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` ### .NET Monitoring + ```toml [[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] @@ -402,6 +442,7 @@ your performance counters. 1. Drop into the C:\WINDOWS\System32 directory by typing `C:` then `cd \Windows\System32` 1. Rebuild your counter values, which may take a few moments so please be patient, by running: - ``` - lodctr /r - ``` + +```batchfile +lodctr /r +``` diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 3a74e34a5228a..a126db4ea9501 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -28,6 +28,11 @@ var sampleConfig = ` # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. #UseWildcardsExpansion = false + # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will + # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead + # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this + # setting is false. + #LocalizeWildcardsExpansion = true # Period after which counters will be reread from configuration and wildcards in counter paths expanded CountersRefreshInterval="1m" @@ -141,11 +146,12 @@ var sampleConfig = ` type Win_PerfCounters struct { PrintValid bool //deprecated: determined dynamically - PreVistaSupport bool - UsePerfCounterTime bool - Object []perfobject - CountersRefreshInterval config.Duration - UseWildcardsExpansion bool + PreVistaSupport bool + UsePerfCounterTime bool + Object []perfobject + CountersRefreshInterval config.Duration + UseWildcardsExpansion bool + LocalizeWildcardsExpansion bool Log telegraf.Logger @@ -247,6 +253,7 @@ func (m *Win_PerfCounters) SampleConfig() string { //objectName string, counter string, instance string, measurement string, include_total bool func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instance string, counterName string, measurement string, includeTotal bool) error { + origCounterPath := counterPath var err error var counterHandle PDH_HCOUNTER if !m.query.IsVistaOrNewer() { @@ -273,21 +280,55 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan return err } + origObjectName, _, origCounterName, err := extractCounterInfoFromCounterPath(origCounterPath) + if err != nil { + return err + } + for _, counterPath := range counters { var err error - counterHandle, err := m.query.AddCounterToQuery(counterPath) objectName, instance, counterName, err = extractCounterInfoFromCounterPath(counterPath) if err != nil { return err } + var newItem *counter + if !m.LocalizeWildcardsExpansion { + // On localized installations of Windows, Telegraf + // should return English metrics, but + // ExpandWildCardPath returns localized counters. Undo + // that by using the original object and counter + // names, along with the expanded instance. + + var newInstance string + if instance == "" { + newInstance = emptyInstance + } else { + newInstance = instance + } + counterPath = formatPath(origObjectName, newInstance, origCounterName) + counterHandle, err = m.query.AddEnglishCounterToQuery(counterPath) + newItem = &counter{ + counterPath, + origObjectName, origCounterName, + instance, measurement, + includeTotal, counterHandle, + } + } else { + counterHandle, err = m.query.AddCounterToQuery(counterPath) + newItem = &counter{ + counterPath, + objectName, counterName, + instance, measurement, + includeTotal, counterHandle, + } + } + if instance == "_Total" && origInstance == "*" && !includeTotal { continue } - newItem := &counter{counterPath, objectName, counterName, instance, measurement, - includeTotal, counterHandle} m.counters = append(m.counters, newItem) if m.PrintValid { @@ -306,6 +347,16 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan return nil } +const emptyInstance = "------" + +func formatPath(objectname string, instance string, counter string) string { + if instance == emptyInstance { + return "\\" + objectname + "\\" + counter + } else { + return "\\" + objectname + "(" + instance + ")\\" + counter + } +} + func (m *Win_PerfCounters) ParseConfig() error { var counterPath string @@ -315,11 +366,7 @@ func (m *Win_PerfCounters) ParseConfig() error { for _, instance := range PerfObject.Instances { objectname := PerfObject.ObjectName - if instance == "------" { - counterPath = "\\" + objectname + "\\" + counter - } else { - counterPath = "\\" + objectname + "(" + instance + ")\\" + counter - } + counterPath = formatPath(objectname, instance, counter) err := m.AddItem(counterPath, objectname, instance, counter, PerfObject.Measurement, PerfObject.IncludeTotal) @@ -447,7 +494,7 @@ func shouldIncludeMetric(metric *counter, cValue CounterValue) bool { // Catch if we set it to total or some form of it return true } - if metric.instance == "------" { + if metric.instance == emptyInstance { return true } return false @@ -476,8 +523,43 @@ func isKnownCounterDataError(err error) bool { return false } +func (m *Win_PerfCounters) Init() error { + if m.UseWildcardsExpansion && !m.LocalizeWildcardsExpansion { + // Counters must not have wildcards with this option + + found := false + wildcards := []string{"*", "?"} + + for _, object := range m.Object { + for _, wildcard := range wildcards { + if strings.Contains(object.ObjectName, wildcard) { + found = true + m.Log.Errorf("object: %s, contains wildcard %s", object.ObjectName, wildcard) + } + } + for _, counter := range object.Counters { + for _, wildcard := range wildcards { + if strings.Contains(counter, wildcard) { + found = true + m.Log.Errorf("object: %s, counter: %s contains wildcard %s", object.ObjectName, counter, wildcard) + } + } + } + } + + if found { + return fmt.Errorf("wildcards can't be used with LocalizeWildcardsExpansion=false") + } + } + return nil +} + func init() { inputs.Add("win_perf_counters", func() telegraf.Input { - return &Win_PerfCounters{query: &PerformanceQueryImpl{}, CountersRefreshInterval: config.Duration(time.Second * 60)} + return &Win_PerfCounters{ + query: &PerformanceQueryImpl{}, + CountersRefreshInterval: config.Duration(time.Second * 60), + LocalizeWildcardsExpansion: true, + } }) } diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index c7ceec815f0f8..63483379315ee 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -452,7 +452,12 @@ func TestWinPerfcountersConfigError1Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } m.query.Open() err := m.ParseConfig() @@ -486,7 +491,12 @@ func TestWinPerfcountersConfigError2Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } m.query.Open() err := m.ParseConfig() @@ -522,7 +532,12 @@ func TestWinPerfcountersConfigError3Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } m.query.Open() err := m.ParseConfig() @@ -557,7 +572,12 @@ func TestWinPerfcountersCollect1Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, Object: perfobjects, query: &PerformanceQueryImpl{}} + m := Win_PerfCounters{ + PrintValid: false, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + Log: testutil.Logger{}, + } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) @@ -603,7 +623,14 @@ func TestWinPerfcountersCollect2Integration(t *testing.T) { perfobjects[0] = PerfObject - m := Win_PerfCounters{PrintValid: false, UsePerfCounterTime: true, Object: perfobjects, query: &PerformanceQueryImpl{}, UseWildcardsExpansion: true} + m := Win_PerfCounters{ + PrintValid: false, + UsePerfCounterTime: true, + Object: perfobjects, + query: &PerformanceQueryImpl{}, + UseWildcardsExpansion: true, + Log: testutil.Logger{}, + } var acc testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 998423e792db9..5519e3d37b920 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -1024,3 +1024,47 @@ func TestUTF16ToStringArray(t *testing.T) { czechStrings := UTF16ToStringArray(unicodeStringListWithCzechChars) require.Equal(t, czechStrings, stringArrayWithCzechChars, "Not equal czech arrays") } + +func TestNoWildcards(t *testing.T) { + m := Win_PerfCounters{ + Object: createPerfObject("measurement", "object", []string{"instance"}, []string{"counter*"}, false, false), + UseWildcardsExpansion: true, + LocalizeWildcardsExpansion: false, + Log: testutil.Logger{}, + } + require.Error(t, m.Init()) + m = Win_PerfCounters{ + Object: createPerfObject("measurement", "object?", []string{"instance"}, []string{"counter"}, false, false), + UseWildcardsExpansion: true, + LocalizeWildcardsExpansion: false, + Log: testutil.Logger{}, + } + require.Error(t, m.Init()) +} + +func TestLocalizeWildcardsExpansion(t *testing.T) { + // this test is valid only on localized windows + if testing.Short() { + t.Skip("Skipping long taking test in short mode") + } + + const counter = "% Processor Time" + m := Win_PerfCounters{ + query: &PerformanceQueryImpl{}, + CountersRefreshInterval: config.Duration(time.Second * 60), + Object: createPerfObject("measurement", "Processor Information", + []string{"_Total"}, []string{counter}, false, false), + LocalizeWildcardsExpansion: false, + UseWildcardsExpansion: true, + Log: testutil.Logger{}, + } + require.NoError(t, m.Init()) + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + require.Len(t, acc.Metrics, 1) + + //running on localized windows with UseWildcardsExpansion and + //with LocalizeWildcardsExpansion, this will be localized. Using LocalizeWildcardsExpansion=false it will + //be English. + require.Contains(t, acc.Metrics[0].Fields, sanitizedChars.Replace(counter)) +} From 6ce4729813db6e22319f147ca77701884721cd95 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 08:42:47 -0700 Subject: [PATCH 062/133] makefile: fix indenting, use corret markdownlint binary (#10143) --- Makefile | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 7acd336cba7bc..290008d8c34b0 100644 --- a/Makefile +++ b/Makefile @@ -148,26 +148,26 @@ lint-install: .PHONY: lint lint: - ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 - endif + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } golangci-lint run - ifeq (, $(shell which markdownlint)) - $(info markdownlint can't be found, please run: make lint-install) - exit 1 - endif + @which markdownlint >/dev/null 2>&1 || { \ + echo "markdownlint not found, please run: make lint-install"; \ + exit 1; \ + } - markdownlint-cli + markdownlint . .PHONY: lint-branch lint-branch: - ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 - endif + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } golangci-lint run --new-from-rev master From 96e939a08221c7b8ba0027e91d96579edea54f20 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:12 -0700 Subject: [PATCH 063/133] chore: clean up all markdown lint errors in aggregator plugins (#10151) --- plugins/aggregators/basicstats/README.md | 40 +++++++++--------- plugins/aggregators/derivative/README.md | 48 ++++++++++++---------- plugins/aggregators/final/README.md | 11 ++--- plugins/aggregators/histogram/README.md | 33 ++++++++------- plugins/aggregators/merge/README.md | 4 +- plugins/aggregators/minmax/README.md | 14 +++---- plugins/aggregators/quantile/README.md | 26 +++++++----- plugins/aggregators/valuecounter/README.md | 20 +++++---- 8 files changed, 105 insertions(+), 91 deletions(-) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index f13dd8f375682..ede108ec57d90 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -3,7 +3,7 @@ The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values, emitting the aggregate every `period` seconds. -### Configuration: +## Configuration ```toml # Keep the aggregate basicstats of each metric passing through. @@ -20,32 +20,32 @@ emitting the aggregate every `period` seconds. ``` - stats - - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. - - If empty array, no stats are aggregated + - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. + - If empty array, no stats are aggregated -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_count - - field1_diff (difference) - - field1_rate (rate per second) - - field1_max - - field1_min - - field1_mean - - field1_non_negative_diff (non-negative difference) - - field1_non_negative_rate (non-negative rate per second) - - field1_sum - - field1_s2 (variance) - - field1_stdev (standard deviation) - - field1_interval (interval in nanoseconds) - -### Tags: + - field1_count + - field1_diff (difference) + - field1_rate (rate per second) + - field1_max + - field1_min + - field1_mean + - field1_non_negative_diff (non-negative difference) + - field1_non_negative_rate (non-negative rate per second) + - field1_sum + - field1_s2 (variance) + - field1_stdev (standard deviation) + - field1_interval (interval in nanoseconds) + +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 diff --git a/plugins/aggregators/derivative/README.md b/plugins/aggregators/derivative/README.md index 3ca29c36d4f49..6d47dc4c1850f 100644 --- a/plugins/aggregators/derivative/README.md +++ b/plugins/aggregators/derivative/README.md @@ -1,42 +1,47 @@ # Derivative Aggregator Plugin + The Derivative Aggregator Plugin estimates the derivative for all fields of the aggregated metrics. -### Time Derivatives +## Time Derivatives In its default configuration it determines the first and last measurement of the period. From these measurements the time difference in seconds is calculated. This time difference is than used to divide the difference of each field using the following formula: -``` + +```text field_last - field_first derivative = -------------------------- time_difference ``` + For each field the derivative is emitted with a naming pattern `_rate`. -### Custom Derivation Variable +## Custom Derivation Variable The plugin supports to use a field of the aggregated measurements as derivation variable in the denominator. This variable is assumed to be a monotonically increasing value. In this feature the following formula is used: -``` + +```text field_last - field_first derivative = -------------------------------- variable_last - variable_first ``` + **Make sure the specified variable is not filtered and exists in the metrics passed to this aggregator!** -When using a custom derivation variable, you should change the `suffix` of the derivative name. +When using a custom derivation variable, you should change the `suffix` of the derivative name. See the next section on [customizing the derivative name](#customize-the-derivative-name) for details. -### Customize the Derivative Name +## Customize the Derivative Name The derivatives generated by the aggregator are named `_rate`, i.e. they are composed of the field name and a suffix `_rate`. You can configure the suffix to be used by changing the `suffix` parameter. -### Roll-Over to next Period +## Roll-Over to next Period Calculating the derivative for a period requires at least two distinct measurements during that period. Whether those are available depends on the configuration of the aggregator `period` and the agent `interval`. @@ -47,7 +52,7 @@ replace the roll-over metric. A main benefit of this roll-over is the ability to cope with multiple "quiet" periods, where no new measurement is pushed to the aggregator. The roll-over will take place at most `max_roll_over` times. -#### Example of Roll-Over +### Example of Roll-Over Let us assume we have an input plugin, that generates a measurement with a single metric "test" every 2 seconds. Let this metric increase the first 10 seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0: @@ -111,18 +116,18 @@ To illustrate this, let us compare the derivatives for `period = "7s"`. | timestamp | value | `max_roll_over = 0` | `max_roll_over = 1` | |-----------|-------|-----------|--------------| | 0 | 0.0 | -| 2 | 2.0 | -| 4 | 4.0 | -| 6 | 6.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | ||| 1.0 | 1.0 | | 8 | 8.0 | -| 10 | 10.0 | -| 12 | 8.0 | -||| 0.0 | 0.33... | -| 14 | 6.0 | +| 10 | 10.0 | +| 12 | 8.0 | +||| 0.0 | 0.33... | +| 14 | 6.0 | | 16 | 4.0 | -| 18 | 2.0 | -| 20 | 0.0 | +| 18 | 2.0 | +| 20 | 0.0 | ||| -1.0 | -1.0 | The difference stems from the change of the value between periods, e.g. from 6.0 to 8.0 between first and second period. @@ -130,7 +135,7 @@ Thoses changes are omitted with `max_roll_over = 0` but are respected with `max_ That there are no more differences in the calculated derivatives is due to the example data, which has constant derivatives in during the first and last period, even when including the gap between the periods. Using `max_roll_over` with a value greater 0 may be important, if you need to detect changes between periods, e.g. when you have very few measurements in a period or quasi-constant metrics with only occasional changes. -### Configuration +## Configuration ```toml [[aggregators.derivative]] @@ -151,13 +156,14 @@ Using `max_roll_over` with a value greater 0 may be important, if you need to de period = "30s" ``` -### Tags: +### Tags + No tags are applied by this aggregator. Existing tags are passed throug the aggregator untouched. -### Example Output +## Example Output -``` +```text net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000 net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000 net bytes_recv_by_packets_recv=292.89 1508843660000000000 diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md index 444746d784349..10dc72139effd 100644 --- a/plugins/aggregators/final/README.md +++ b/plugins/aggregators/final/README.md @@ -11,7 +11,7 @@ discrete time series such as procstat, cgroup, kubernetes etc. When a series has not been updated within the time defined in `series_timeout`, the last metric is emitted with the `_final` appended. -### Configuration +## Configuration ```toml [[aggregators.final]] @@ -25,20 +25,21 @@ When a series has not been updated within the time defined in series_timeout = "5m" ``` -### Metrics +## Metrics Measurement and tags are unchanged, fields are emitted with the suffix `_final`. -### Example Output +## Example Output -``` +```text counter,host=bar i_final=3,j_final=6 1554281635115090133 counter,host=foo i_final=3,j_final=6 1554281635112992012 ``` Original input: -``` + +```text counter,host=bar i=1,j=4 1554281633101153300 counter,host=foo i=1,j=4 1554281633099323601 counter,host=bar i=2,j=5 1554281634107980073 diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index f0b6c15b11804..5fd56f1fbc345 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -12,7 +12,7 @@ By default bucket counts are not reset between periods and will be non-strictly increasing while Telegraf is running. This behavior can be changed by setting the `reset` parameter to true. -#### Design +## Design Each metric is passed to the aggregator and this aggregator searches histogram buckets for those fields, which have been specified in the @@ -24,7 +24,7 @@ The algorithm of hit counting to buckets was implemented on the base of the algorithm which is implemented in the Prometheus [client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go). -### Configuration +## Configuration ```toml # Configuration for aggregate histogram metrics @@ -73,40 +73,39 @@ boundaries. Each float value defines the inclusive upper (right) bound of the b The `+Inf` bucket is added automatically and does not need to be defined. (For left boundaries, these specified bucket borders and `-Inf` will be used). -### Measurements & Fields: +## Measurements & Fields The postfix `bucket` will be added to each field key. - measurement1 - - field1_bucket - - field2_bucket + - field1_bucket + - field2_bucket -### Tags: +### Tags -* `cumulative = true` (default): - * `le`: Right bucket border. It means that the metric value is less than or +- `cumulative = true` (default): + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. If a metric value is sorted into a bucket, it is also sorted into all larger buckets. As a result, the value of `_bucket` is rising with rising `le` value. When `le` is `+Inf`, the bucket value is the count of all metrics, because all metric values are less than or equal to positive infinity. -* `cumulative = false`: - * `gt`: Left bucket border. It means that the metric value is greater than +- `cumulative = false`: + - `gt`: Left bucket border. It means that the metric value is greater than (and not equal to) the value of this tag. - * `le`: Right bucket border. It means that the metric value is less than or + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. - * As both `gt` and `le` are present, each metric is sorted in only exactly - one bucket. + - As both `gt` and `le` are present, each metric is sorted in only exactly + one bucket. - -### Example Output: +## Example Output Let assume we have the buckets [0, 10, 50, 100] and the following field values for `usage_idle`: [50, 7, 99, 12] With `cumulative = true`: -``` +```text cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12 @@ -116,7 +115,7 @@ cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # With `cumulative = false`: -``` +```text cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12 diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md index 89f7f0983c692..79cad5cfb4bd3 100644 --- a/plugins/aggregators/merge/README.md +++ b/plugins/aggregators/merge/README.md @@ -7,7 +7,7 @@ Use this plugin when fields are split over multiple metrics, with the same measurement, tag set and timestamp. By merging into a single metric they can be handled more efficiently by the output. -### Configuration +## Configuration ```toml [[aggregators.merge]] @@ -16,7 +16,7 @@ be handled more efficiently by the output. drop_original = true ``` -### Example +## Example ```diff - cpu,host=localhost usage_time=42 1567562620000000000 diff --git a/plugins/aggregators/minmax/README.md b/plugins/aggregators/minmax/README.md index f7405b78cbe9d..fefd2f2e2e165 100644 --- a/plugins/aggregators/minmax/README.md +++ b/plugins/aggregators/minmax/README.md @@ -3,7 +3,7 @@ The minmax aggregator plugin aggregates min & max values of each field it sees, emitting the aggrate every `period` seconds. -### Configuration: +## Configuration ```toml # Keep the aggregate min/max of each metric passing through. @@ -16,19 +16,19 @@ emitting the aggrate every `period` seconds. drop_original = false ``` -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_max - - field1_min + - field1_max + - field1_min -### Tags: +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1.72 1475583980000000000 system,host=tars load1=1.6 1475583990000000000 diff --git a/plugins/aggregators/quantile/README.md b/plugins/aggregators/quantile/README.md index 77d0f856409ec..423857465c18d 100644 --- a/plugins/aggregators/quantile/README.md +++ b/plugins/aggregators/quantile/README.md @@ -3,7 +3,7 @@ The quantile aggregator plugin aggregates specified quantiles for each numeric field per metric it sees and emits the quantiles every `period`. -### Configuration +## Configuration ```toml [[aggregators.quantile]] @@ -33,8 +33,10 @@ per metric it sees and emits the quantiles every `period`. # compression = 100.0 ``` -#### Algorithm types -##### t-digest +## Algorithm types + +### t-digest + Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a special data-structure to cluster data. These clusters are later used to approximate the requested quantiles. The bounds of the approximation @@ -47,7 +49,8 @@ where exact quantile calculation isn't required. For implementation details see the underlying [golang library][tdigest_lib]. -##### exact R7 and R8 +### exact R7 and R8 + These algorithms compute quantiles as described in [Hyndman & Fan (1996)][hyndman_fan]. The R7 variant is used in Excel and NumPy. The R8 variant is recommended by Hyndman & Fan due to its independence of the underlying sample distribution. @@ -57,8 +60,8 @@ a lot of memory when used with a large number of series or a large number of samples. They are slower than the `t-digest` algorithm and are recommended only to be used with a small number of samples and series. +## Benchmark (linux/amd64) -#### Benchmark (linux/amd64) The benchmark was performed by adding 100 metrics with six numeric (and two non-numeric) fields to the aggregator and the derive the aggregation result. @@ -72,7 +75,8 @@ result. | exact R7 | 100 | 7868816 ns/op | | exact R8 | 100 | 8099612 ns/op | -### Measurements +## Measurements + Measurement names are passed trough this aggregator. ### Fields @@ -82,6 +86,7 @@ fields are aggregated in the form `_`. Other field types (e.g. boolean, string) are ignored and dropped from the output. For example passing in the following metric as *input*: + - somemetric - average_response_ms (float64) - minimum_response_ms (float64) @@ -89,7 +94,8 @@ For example passing in the following metric as *input*: - status (string) - ok (boolean) -and the default setting for `quantiles ` you get the following *output* +and the default setting for `quantiles` you get the following *output* + - somemetric - average_response_ms_025 (float64) - average_response_ms_050 (float64) @@ -110,18 +116,18 @@ Tags are passed through to the output by this aggregator. ### Example Output -``` +```text cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000 cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000 cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000 cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000 ``` -# References +## References + - Dunning & Ertl: "Computing Extremely Accurate Quantiles Using t-Digests", arXiv:1902.04023 (2019) [pdf][tdigest_paper] - Hyndman & Fan: "Sample Quantiles in Statistical Packages", The American Statistician, vol. 50, pp. 361-365 (1996) [pdf][hyndman_fan] - [tdigest_paper]: https://arxiv.org/abs/1902.04023 [tdigest_lib]: https://github.com/caio/go-tdigest [hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf diff --git a/plugins/aggregators/valuecounter/README.md b/plugins/aggregators/valuecounter/README.md index ef68e0f4e57ca..1f74a4982c577 100644 --- a/plugins/aggregators/valuecounter/README.md +++ b/plugins/aggregators/valuecounter/README.md @@ -15,7 +15,7 @@ Counting fields with a high number of potential values may produce significant amounts of new fields and memory usage, take care to only count fields with a limited set of values. -### Configuration: +## Configuration ```toml [[aggregators.valuecounter]] @@ -29,22 +29,23 @@ limited set of values. fields = ["status"] ``` -### Measurements & Fields: +### Measurements & Fields - measurement1 - - field_value1 - - field_value2 + - field_value1 + - field_value2 -### Tags: +### Tags No tags are applied by this aggregator. -### Example Output: +## Example Output Example for parsing a HTTP access log. telegraf.conf: -``` + +```toml [[inputs.logparser]] files = ["/tmp/tst.log"] [inputs.logparser.grok] @@ -57,13 +58,14 @@ telegraf.conf: ``` /tmp/tst.log -``` + +```text /some/path 200 /some/path 401 /some/path 200 ``` -``` +```shell $ telegraf --config telegraf.conf --quiet access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011 From 0036757afebba2c2c18afa026bf5874a695572c8 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:19 -0700 Subject: [PATCH 064/133] chore: clean up all markdown lint errors in first half of docs directory (#10152) --- docs/AGGREGATORS.md | 124 ++++++++++++++--------------- docs/AGGREGATORS_AND_PROCESSORS.md | 8 +- docs/COMMANDS_AND_FLAGS.md | 16 ++-- docs/CONFIGURATION.md | 67 ++++++++++------ docs/EXTERNAL_PLUGINS.md | 79 +++++++++--------- docs/FAQ.md | 33 ++++---- docs/INPUTS.md | 12 +-- docs/INTEGRATION_TESTS.md | 61 +++++++------- docs/OUTPUTS.md | 9 ++- 9 files changed, 220 insertions(+), 189 deletions(-) diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index 0edf467837457..265b9fa6893a9 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -1,15 +1,15 @@ -### Aggregator Plugins +# Aggregator Plugins This section is for developers who want to create a new aggregator plugin. -### Aggregator Plugin Guidelines +## Aggregator Plugin Guidelines * A aggregator must conform to the [telegraf.Aggregator][] interface. * Aggregators should call `aggregators.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. -- The `SampleConfig` function should return valid toml that describes how the +* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this aggregator does. @@ -17,7 +17,7 @@ This section is for developers who want to create a new aggregator plugin. through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. -- Follow the recommended [Code Style][]. +* Follow the recommended [Code Style][]. ### Aggregator Plugin Example @@ -27,21 +27,21 @@ package min // min.go import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" ) type Min struct { - // caches for metric fields, names, and tags - fieldCache map[uint64]map[string]float64 - nameCache map[uint64]string - tagCache map[uint64]map[string]string + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string } func NewMin() telegraf.Aggregator { - m := &Min{} - m.Reset() - return m + m := &Min{} + m.Reset() + return m } var sampleConfig = ` @@ -53,77 +53,77 @@ var sampleConfig = ` ` func (m *Min) Init() error { - return nil + return nil } func (m *Min) SampleConfig() string { - return sampleConfig + return sampleConfig } func (m *Min) Description() string { - return "Keep the aggregate min of each metric passing through." + return "Keep the aggregate min of each metric passing through." } func (m *Min) Add(in telegraf.Metric) { - id := in.HashID() - if _, ok := m.nameCache[id]; !ok { - // hit an uncached metric, create caches for first time: - m.nameCache[id] = in.Name() - m.tagCache[id] = in.Tags() - m.fieldCache[id] = make(map[string]float64) - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - m.fieldCache[id][k] = fv - } - } - } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.fieldCache[id][k]; !ok { - // hit an uncached field of a cached metric - m.fieldCache[id][k] = fv - continue - } - if fv < m.fieldCache[id][k] { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { // set new minimum - m.fieldCache[id][k] = fv - } - } - } - } + m.fieldCache[id][k] = fv + } + } + } + } } func (m *Min) Push(acc telegraf.Accumulator) { - for id, _ := range m.nameCache { - fields := map[string]interface{}{} - for k, v := range m.fieldCache[id] { - fields[k+"_min"] = v - } - acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) - } + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } } func (m *Min) Reset() { - m.fieldCache = make(map[uint64]map[string]float64) - m.nameCache = make(map[uint64]string) - m.tagCache = make(map[uint64]map[string]string) + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) } func convert(in interface{}) (float64, bool) { - switch v := in.(type) { - case float64: - return v, true - case int64: - return float64(v), true - default: - return 0, false - } + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } } func init() { - aggregators.Add("min", func() telegraf.Aggregator { - return NewMin() - }) + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) } ``` diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 934a4b0cf7706..389138cec7a94 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -5,7 +5,7 @@ As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugin These plugins sit in-between Input & Output plugins, aggregating and processing metrics as they pass through Telegraf: -``` +```text ┌───────────┐ │ │ │ CPU │───┐ @@ -44,12 +44,14 @@ to control which metrics are passed through a processor or aggregator. If a metric is filtered out the metric bypasses the plugin and is passed downstream to the next plugin. -### Processor +## Processor + Processor plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. -### Aggregator +## Aggregator + Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, minimum, maximum, or standard deviation. For this reason, all _aggregator_ diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md index cb0c31268c9a4..babccb54ba152 100644 --- a/docs/COMMANDS_AND_FLAGS.md +++ b/docs/COMMANDS_AND_FLAGS.md @@ -1,27 +1,27 @@ # Telegraf Commands & Flags -### Usage +## Usage -``` +```shell telegraf [commands] telegraf [flags] ``` -### Commands +## Commands |command|description| |--------|-----------------------------------------------| |`config` |print out full sample configuration to stdout| |`version`|print the version to stdout| -### Flags +## Flags |flag|description| |-------------------|------------| |`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| |`--config ` |configuration file to load| |`--config-directory ` |directory containing additional *.conf files| -|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--watch-config` |Telegraf will restart on local config changes. Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`. Monitoring is off by default.| |`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| |`--debug` |turn on debug logging| |`--input-filter ` |filter the inputs to enable, separator is `:`| @@ -32,7 +32,7 @@ telegraf [flags] |`--pprof-addr
` |pprof address to listen on, don't activate pprof if empty| |`--processor-filter ` |filter the processors to enable, separator is `:`| |`--quiet` |run in quiet mode| -|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--section-filter` |filter config sections to output, separator is `:`. Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| |`--sample-config` |print out full sample configuration| |`--once` |enable once mode: gather metrics once, write them, and exit| |`--test` |enable test mode: gather metrics once and print them| @@ -40,7 +40,7 @@ telegraf [flags] |`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| |`--version` |display the version and exit| -### Examples +## Examples **Generate a telegraf config file:** @@ -55,7 +55,7 @@ telegraf [flags] `telegraf --config telegraf.conf --test` **Run telegraf with all plugins defined in config file:** - + `telegraf --config telegraf.conf` **Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9af88b669ea9f..25d10a90b1340 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,3 +1,5 @@ + + # Configuration Telegraf's configuration file is written using [TOML][] and is composed of @@ -5,9 +7,10 @@ three sections: [global tags][], [agent][] settings, and [plugins][]. View the default [telegraf.conf][] config file with all available plugins. -### Generating a Configuration File +## Generating a Configuration File A default config file can be generated by telegraf: + ```sh telegraf config > telegraf.conf ``` @@ -21,7 +24,7 @@ telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config [View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. -### Configuration Loading +## Configuration Loading The location of the configuration file can be set via the `--config` command line flag. @@ -34,7 +37,7 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for the main configuration file and `/etc/telegraf/telegraf.d` for the directory of configuration files. -### Environment Variables +## Environment Variables Environment variables can be used anywhere in the config file, simply surround them with `${}`. Replacement occurs before file parsing. For strings @@ -49,14 +52,17 @@ in the `/etc/default/telegraf` file. `/etc/default/telegraf`: For InfluxDB 1.x: -``` + +```shell USER="alice" INFLUX_URL="http://localhost:8086" INFLUX_SKIP_DATABASE_CREATION="true" INFLUX_PASSWORD="monkey123" ``` + For InfluxDB OSS 2: -``` + +```shell INFLUX_HOST="http://localhost:8086" # used to be 9999 INFLUX_TOKEN="replace_with_your_token" INFLUX_ORG="your_username" @@ -64,7 +70,8 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` For InfluxDB Cloud 2: -``` + +```shell # For AWS West (Oregon) INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls @@ -74,6 +81,7 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` `/etc/telegraf.conf`: + ```toml [global_tags] user = "${USER}" @@ -103,6 +111,7 @@ INFLUX_BUCKET="replace_with_your_bucket_name" The above files will produce the following effective configuration file to be parsed: + ```toml [global_tags] user = "alice" @@ -132,17 +141,18 @@ parsed: bucket = "replace_with_your_bucket_name" ``` -### Intervals +## Intervals Intervals are durations of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. + ```toml [agent] interval = "10s" ``` -### Global Tags +## Global Tags Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. @@ -153,7 +163,7 @@ Global tags are overriden by tags set by plugins. dc = "us-east-1" ``` -### Agent +## Agent The agent table configures Telegraf and the defaults used across all plugins. @@ -209,7 +219,6 @@ The agent table configures Telegraf and the defaults used across all plugins. Name of the file to be logged to when using the "file" logtarget. If set to the empty string then logs are written to stderr. - - **logfile_rotation_interval**: The logfile will be rotated after the time interval specified. When set to 0 no time based rotation is performed. @@ -231,7 +240,7 @@ The agent table configures Telegraf and the defaults used across all plugins. - **omit_hostname**: If set to true, do no set the "host" tag in the telegraf agent. -### Plugins +## Plugins Telegraf plugins are divided into 4 types: [inputs][], [outputs][], [processors][], and [aggregators][]. @@ -287,6 +296,7 @@ emitted from the input plugin. #### Examples Use the name_suffix parameter to emit measurements with the name `cpu_total`: + ```toml [[inputs.cpu]] name_suffix = "_total" @@ -295,6 +305,7 @@ Use the name_suffix parameter to emit measurements with the name `cpu_total`: ``` Use the name_override parameter to emit measurements with the name `foobar`: + ```toml [[inputs.cpu]] name_override = "foobar" @@ -307,6 +318,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` > **NOTE**: With TOML, order matters. Parameters belong to the last defined > table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin > definition. + ```toml [[inputs.cpu]] percpu = false @@ -318,6 +330,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` Utilize `name_override`, `name_prefix`, or `name_suffix` config options to avoid measurement collisions when defining multiple plugins: + ```toml [[inputs.cpu]] percpu = false @@ -357,6 +370,7 @@ emitted from the output plugin. #### Examples Override flush parameters for a single output: + ```toml [agent] flush_interval = "10s" @@ -394,6 +408,7 @@ processor. If the order processors are applied matters you must set order on all involved processors: + ```toml [[processors.rename]] order = 1 @@ -445,6 +460,7 @@ aggregator. Collect and emit the min/max of the system load1 metric every 30s, dropping the originals. + ```toml [[inputs.system]] fieldpass = ["load1"] # collects system load1 metric. @@ -460,6 +476,7 @@ the originals. Collect and emit the min/max of the swap metrics every 30s, dropping the originals. The aggregator will not be applied to the system load metrics due to the `namepass` parameter. + ```toml [[inputs.swap]] @@ -475,14 +492,13 @@ to the `namepass` parameter. files = ["stdout"] ``` - -### Metric Filtering +## Metric Filtering Metric filtering can be configured per plugin on any input, output, processor, and aggregator plugin. Filters fall under two categories: Selectors and Modifiers. -#### Selectors +### Selectors Selector filters include or exclude entire metrics. When a metric is excluded from a Input or an Output plugin, the metric is dropped. If a metric is @@ -510,7 +526,7 @@ is tested on metrics after they have passed the `tagpass` test. defined at the *_end_* of the plugin definition, otherwise subsequent plugin config options will be interpreted as part of the tagpass/tagdrop tables. -#### Modifiers +### Modifiers Modifier filters remove tags and fields from a metric. If all fields are removed the metric is removed. @@ -536,9 +552,10 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns will be discarded from the metric. Any tag can be filtered including global tags and the agent `host` tag. -#### Filtering Examples +### Filtering Examples + +#### Using tagpass and tagdrop -##### Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -571,7 +588,8 @@ tags and the agent `host` tag. instance = ["isatap*", "Local*"] ``` -##### Using fieldpass and fielddrop: +#### Using fieldpass and fielddrop + ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -584,7 +602,8 @@ tags and the agent `host` tag. fieldpass = ["inodes*"] ``` -##### Using namepass and namedrop: +#### Using namepass and namedrop + ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -597,7 +616,8 @@ tags and the agent `host` tag. namepass = ["rest_client_*"] ``` -##### Using taginclude and tagexclude: +#### Using taginclude and tagexclude + ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -610,7 +630,8 @@ tags and the agent `host` tag. tagexclude = ["fstype"] ``` -##### Metrics can be routed to different outputs using the metric name and tags: +#### Metrics can be routed to different outputs using the metric name and tags + ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -632,7 +653,7 @@ tags and the agent `host` tag. cpu = ["cpu0"] ``` -##### Routing metrics to different outputs based on the input. +#### Routing metrics to different outputs based on the input Metrics are tagged with `influxdb_database` in the input, which is then used to select the output. The tag is removed in the outputs before writing. @@ -656,7 +677,7 @@ select the output. The tag is removed in the outputs before writing. influxdb_database = "other" ``` -### Transport Layer Security (TLS) +## Transport Layer Security (TLS) Reference the detailed [TLS][] documentation. diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index 83759ed72bb63..f3dc0699ca2df 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -1,8 +1,8 @@ -### External Plugins +# External Plugins -[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside -of Telegraf that can run through an `execd` plugin. These external plugins allow for -more flexibility compared to internal Telegraf plugins. +[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside +of Telegraf that can run through an `execd` plugin. These external plugins allow for +more flexibility compared to internal Telegraf plugins. - External plugins can be written in any language (internal Telegraf plugins can only written in Go) - External plugins can access to libraries not written in Go @@ -11,7 +11,8 @@ more flexibility compared to internal Telegraf plugins. - You don't need to wait on the Telegraf team to publish your plugin and start working with it. - using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use -### External Plugin Guidelines +## External Plugin Guidelines + The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), [output](/docs/OUTPUTS.md), [processor](/docs/PROCESSORS.md), and [aggregator](/docs/AGGREGATORS.md) plugins. Please reference the documentation on how to create these plugins written in Go. @@ -19,51 +20,55 @@ Please reference the documentation on how to create these plugins written in Go. _For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance and feature development of external plugins. Expect to have users open plugin issues on its respective GitHub repository._ -#### Execd Go Shim +### Execd Go Shim + For Go plugins, there is a [Execd Go Shim](/plugins/common/shim/) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim allows anyone to build and run it as a separate app using one of the `execd`plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) Follow the [Steps to externalize a plugin](/plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](/plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim -#### Step-by-Step guidelines -This is a guide to help you set up your plugin to use it with `execd` -1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: +### Step-by-Step guidelines + +This is a guide to help you set up your plugin to use it with `execd`: + +1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: - [Input Plugins](/docs/INPUTS.md) - [Processor Plugins](/docs/PROCESSORS.md) - [Aggregator Plugins](/docs/AGGREGATORS.md) - [Output Plugins](/docs/OUTPUTS.md) 2. If your plugin is written in Go, include the steps for the [Execd Go Shim](/plugins/common/shim#steps-to-build-and-run-your-plugin) - 1. Move the project to an external repo, it's recommended to preserve the path - structure, (but not strictly necessary). eg if your plugin was at - `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` - in the new repo. For a further example of what this might look like, take a - look at [ssoroka/rand](https://github.com/ssoroka/rand) or - [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) - 1. Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. - This will be the entrypoint to the plugin when run as a stand-alone program, and - it will call the shim code for you to make that happen. It's recommended to - have only one plugin per repo, as the shim is not designed to run multiple - plugins at the same time (it would vastly complicate things). - 1. Edit the main.go file to import your plugin. Within Telegraf this would have - been done in an all.go file, but here we don't split the two apart, and the change - just goes in the top of main.go. If you skip this step, your plugin will do nothing. - eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` - 1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration - specific to your plugin. Note that this config file **must be separate from the - rest of the config for Telegraf, and must not be in a shared directory where - Telegraf is expecting to load all configs**. If Telegraf reads this config file - it will not know which plugin it relates to. Telegraf instead uses an execd config - block to look for this plugin. - 1. Add usage and development instructions in the homepage of your repository for running - your plugin with its respective `execd` plugin. Please refer to - [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) - for examples. Include the following steps: + - Move the project to an external repo, it's recommended to preserve the path + structure, (but not strictly necessary). eg if your plugin was at + `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` + in the new repo. For a further example of what this might look like, take a + look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) + - Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. It's recommended to + have only one plugin per repo, as the shim is not designed to run multiple + plugins at the same time (it would vastly complicate things). + - Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. + eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` + - Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. Telegraf instead uses an execd config + block to look for this plugin. + - Add usage and development instructions in the homepage of your repository for running + your plugin with its respective `execd` plugin. Please refer to + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) + for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin 1. The commands to build your binary 1. Location to edit your `telegraf.conf` - 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), + 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) - 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) - list. Please include the plugin name, link to the plugin repository and a short description of the plugin. + - Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) + list. Please include the plugin name, link to the plugin repository and a short description of the plugin. diff --git a/docs/FAQ.md b/docs/FAQ.md index 40a101fdf6fe1..c702a91564994 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,24 +1,23 @@ # Frequently Asked Questions -### Q: How can I monitor the Docker Engine Host from within a container? +## Q: How can I monitor the Docker Engine Host from within a container? You will need to setup several volume mounts as well as some environment variables: -``` + +```shell docker run --name telegraf \ - -v /:/hostfs:ro \ - -e HOST_ETC=/hostfs/etc \ - -e HOST_PROC=/hostfs/proc \ - -e HOST_SYS=/hostfs/sys \ - -e HOST_VAR=/hostfs/var \ - -e HOST_RUN=/hostfs/run \ - -e HOST_MOUNT_PREFIX=/hostfs \ - telegraf + -v /:/hostfs:ro \ + -e HOST_ETC=/hostfs/etc \ + -e HOST_PROC=/hostfs/proc \ + -e HOST_SYS=/hostfs/sys \ + -e HOST_VAR=/hostfs/var \ + -e HOST_RUN=/hostfs/run \ + -e HOST_MOUNT_PREFIX=/hostfs \ + telegraf ``` - -### Q: Why do I get a "no such host" error resolving hostnames that other -programs can resolve? +## Q: Why do I get a "no such host" error resolving hostnames that other programs can resolve? Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution). This resolver behaves differently than the C library functions but is more @@ -29,16 +28,18 @@ that are unsupported by the pure Go resolver, you can switch to the cgo resolver. If running manually set: -``` + +```shell export GODEBUG=netdns=cgo ``` If running as a service add the environment variable to `/etc/default/telegraf`: -``` + +```shell GODEBUG=netdns=cgo ``` -### Q: How can I manage series cardinality? +## Q: How can I manage series cardinality? High [series cardinality][], when not properly managed, can cause high load on your database. Telegraf attempts to avoid creating series with high diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 679c24e287604..6f553b060aadb 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -1,4 +1,4 @@ -### Input Plugins +# Input Plugins This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to @@ -8,7 +8,7 @@ to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop and submit new inputs. -### Input Plugin Guidelines +## Input Plugin Guidelines - A plugin must conform to the [telegraf.Input][] interface. - Input Plugins should call `inputs.Add` in their `init` function to register @@ -25,7 +25,7 @@ and submit new inputs. Let's say you've written a plugin that emits metrics about processes on the current host. -### Input Plugin Example +## Input Plugin Example ```go package simple @@ -55,7 +55,7 @@ func (s *Simple) SampleConfig() string { // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Gather(acc telegraf.Accumulator) error { @@ -75,9 +75,9 @@ func init() { ### Development -* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker +- Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment using docker-compose. -* ***[Optional]*** When developing a plugin, add a `dev` directory with a +- ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf` as well as any other supporting files, where sensible. diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md index b7af829588c8b..ef76332951ca9 100644 --- a/docs/INTEGRATION_TESTS.md +++ b/docs/INTEGRATION_TESTS.md @@ -1,61 +1,62 @@ # Integration Tests -To run our current integration test suite: +To run our current integration test suite: Running the integration tests requires several docker containers to be running. You can start the containers with: -``` + +```shell docker-compose up ``` To run only the integration tests use: -``` +```shell make test-integration ``` Use `make docker-kill` to stop the containers. -Contributing integration tests: +Contributing integration tests: - Add Integration to the end of the test name so it will be run with the above command. - Writes tests where no library is being used in the plugin - There is poor code coverage - It has dynamic code that only gets run at runtime eg: SQL -Current areas we have integration tests: +Current areas we have integration tests: | Area | What it does | |------------------------------------|-------------------------------------------| | Inputs: Aerospike | | | Inputs: Disque | | -| Inputs: Dovecot | | -| Inputs: Mcrouter | | -| Inputs: Memcached | | -| Inputs: Mysql | | -| Inputs: Opcua | | -| Inputs: Openldap | | -| Inputs: Pgbouncer | | -| Inputs: Postgresql | | -| Inputs: Postgresql extensible | | -| Inputs: Procstat / Native windows | | -| Inputs: Prometheus | | -| Inputs: Redis | | -| Inputs: Sqlserver | | -| Inputs: Win perf counters | | -| Inputs: Win services | | -| Inputs: Zookeeper | | -| Outputs: Cratedb / Postgres | | -| Outputs: Elasticsearch | | -| Outputs: Kafka | | -| Outputs: MQTT | | -| Outputs: Nats | | -| Outputs: NSQ | | +| Inputs: Dovecot | | +| Inputs: Mcrouter | | +| Inputs: Memcached | | +| Inputs: Mysql | | +| Inputs: Opcua | | +| Inputs: Openldap | | +| Inputs: Pgbouncer | | +| Inputs: Postgresql | | +| Inputs: Postgresql extensible | | +| Inputs: Procstat / Native windows | | +| Inputs: Prometheus | | +| Inputs: Redis | | +| Inputs: Sqlserver | | +| Inputs: Win perf counters | | +| Inputs: Win services | | +| Inputs: Zookeeper | | +| Outputs: Cratedb / Postgres | | +| Outputs: Elasticsearch | | +| Outputs: Kafka | | +| Outputs: MQTT | | +| Outputs: Nats | | +| Outputs: NSQ | | Areas we would benefit most from new integration tests: | Area | |------------------------------------| -| SNMP | -| MYSQL | -| SQLSERVER | +| SNMP | +| MYSQL | +| SQLSERVER | diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index db8383126ad68..b9baa69a9d3f4 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -1,10 +1,10 @@ -### Output Plugins +# Output Plugins This section is for developers who want to create a new output sink. Outputs are created in a similar manner as collection plugins, and their interface has similar constructs. -### Output Plugin Guidelines +## Output Plugin Guidelines - An output must conform to the [telegraf.Output][] interface. - Outputs should call `outputs.Add` in their `init` function to register @@ -17,7 +17,7 @@ similar constructs. - The `Description` function should say in one line what this output does. - Follow the recommended [Code Style][]. -### Output Plugin Example +## Output Plugin Example ```go package simpleoutput @@ -46,7 +46,7 @@ func (s *Simple) SampleConfig() string { // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Connect() error { @@ -103,6 +103,7 @@ You should also add the following to your `SampleConfig()`: ## Flushing Metrics to Outputs Metrics are flushed to outputs when any of the following events happen: + - `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval - At least `metric_batch_size` count of metrics are waiting in the buffer - The telegraf process has received a SIGUSR1 signal From 779c1f0a5906a7e5e5dec2330a0dcef8768e10c5 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:25 -0700 Subject: [PATCH 065/133] chore: clean up all markdown lint errors in parser plugins (#10153) --- plugins/parsers/EXAMPLE_README.md | 9 +- plugins/parsers/collectd/README.md | 6 +- plugins/parsers/csv/README.md | 4 +- plugins/parsers/dropwizard/README.md | 156 +++++++++--------- plugins/parsers/form_urlencoded/README.md | 12 +- plugins/parsers/graphite/README.md | 4 +- plugins/parsers/grok/README.md | 29 ++-- plugins/parsers/influx/README.md | 3 +- plugins/parsers/json/README.md | 39 +++-- plugins/parsers/json_v2/README.md | 7 +- plugins/parsers/logfmt/README.md | 8 +- plugins/parsers/nagios/README.md | 2 +- .../parsers/prometheusremotewrite/README.md | 42 ++--- plugins/parsers/value/README.md | 3 +- plugins/parsers/wavefront/README.md | 2 +- plugins/parsers/xpath/README.md | 57 ++++--- 16 files changed, 208 insertions(+), 175 deletions(-) diff --git a/plugins/parsers/EXAMPLE_README.md b/plugins/parsers/EXAMPLE_README.md index b3c1bc2e29db7..4a8e6d7f73fc9 100644 --- a/plugins/parsers/EXAMPLE_README.md +++ b/plugins/parsers/EXAMPLE_README.md @@ -3,7 +3,7 @@ This description explains at a high level what the parser does and provides links to where additional information about the format can be found. -### Configuration +## Configuration This section contains the sample configuration for the parser. Since the configuration for a parser is not have a standalone plugin, use the `file` or @@ -24,22 +24,23 @@ configuration for a parser is not have a standalone plugin, use the `file` or example_option = "example_value" ``` -#### example_option +### example_option If an option requires a more expansive explanation than can be included inline in the sample configuration, it may be described here. -### Metrics +## Metrics The optional Metrics section contains details about how the parser converts input data into Telegraf metrics. -### Examples +## Examples The optional Examples section can show an example conversion from the input format using InfluxDB Line Protocol as the reference format. For line delimited text formats a diff may be appropriate: + ```diff - cpu|host=localhost|source=example.org|value=42 + cpu,host=localhost,source=example.org value=42 diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md index 8dbc052be145d..ad36775354610 100644 --- a/plugins/parsers/collectd/README.md +++ b/plugins/parsers/collectd/README.md @@ -17,7 +17,7 @@ Additional information including client setup can be found You can also change the path to the typesdb or add additional typesdb using `collectd_typesdb`. -### Configuration +## Configuration ```toml [[inputs.socket_listener]] @@ -43,9 +43,9 @@ You can also change the path to the typesdb or add additional typesdb using collectd_parse_multivalue = "split" ``` -### Example Output +## Example Output -``` +```text memory,type=memory,type_instance=buffered value=2520051712 1560455990829955922 memory,type=memory,type_instance=used value=3710791680 1560455990829955922 memory,type=memory,type_instance=buffered value=2520047616 1560455980830417318 diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index 192c9216b3a82..c1d727a37ca1a 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -94,7 +94,7 @@ or a format string in using the Go "reference time" which is defined to be the Consult the Go [time][time parse] package for details and additional examples on how to set the time format. -### Metrics +## Metrics One metric is created for each row with the columns added as fields. The type of the field is automatically determined based on the contents of the value. @@ -102,7 +102,7 @@ of the field is automatically determined based on the contents of the value. In addition to the options above, you can use [metric filtering][] to skip over columns and rows. -### Examples +## Examples Config: diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md index 436518a67e110..99d9af4cf56f9 100644 --- a/plugins/parsers/dropwizard/README.md +++ b/plugins/parsers/dropwizard/README.md @@ -5,7 +5,7 @@ The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] represe [templates]: /docs/TEMPLATE_PATTERN.md [dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ -### Configuration +## Configuration ```toml [[inputs.file]] @@ -51,76 +51,75 @@ The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] represe # tag2 = "tags.tag2" ``` - -### Examples +## Examples A typical JSON of a dropwizard metric registry: ```json { - "version": "3.0.0", - "counters" : { - "measurement,tag1=green" : { - "count" : 1 - } - }, - "meters" : { - "measurement" : { - "count" : 1, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "units" : "events/second" - } - }, - "gauges" : { - "measurement" : { - "value" : 1 - } - }, - "histograms" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0 - } - }, - "timers" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "duration_units" : "seconds", - "rate_units" : "calls/second" - } - } + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } } ``` Would get translated into 4 different measurements: -``` +```text measurement,metric_type=counter,tag1=green count=1 measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 measurement,metric_type=gauge value=1 @@ -133,27 +132,28 @@ Eg. to parse the following JSON document: ```json { - "time" : "2017-02-22T14:33:03.662+02:00", - "tags" : { - "tag1" : "green", - "tag2" : "yellow" - }, - "metrics" : { - "counters" : { - "measurement" : { - "count" : 1 - } - }, - "meters" : {}, - "gauges" : {}, - "histograms" : {}, - "timers" : {} - } + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } } ``` + and translate it into: -``` +```text measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 ``` diff --git a/plugins/parsers/form_urlencoded/README.md b/plugins/parsers/form_urlencoded/README.md index e3700f44e2311..7d61c0bdae693 100644 --- a/plugins/parsers/form_urlencoded/README.md +++ b/plugins/parsers/form_urlencoded/README.md @@ -1,13 +1,12 @@ # Form Urlencoded - The `form-urlencoded` data format parses `application/x-www-form-urlencoded` data, such as commonly used in the [query string][]. A common use case is to pair it with [http_listener_v2][] input plugin to parse request body or query params. -### Configuration +## Configuration ```toml [[inputs.http_listener_v2]] @@ -29,11 +28,12 @@ request body or query params. form_urlencoded_tag_keys = ["tag1"] ``` -### Examples +## Examples -#### Basic parsing +### Basic parsing Config: + ```toml [[inputs.http_listener_v2]] name_override = "mymetric" @@ -44,12 +44,14 @@ Config: ``` Request: + ```bash curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&field1=0.42&field2=42' ``` Output: -``` + +```text mymetric,tag1=foo field1=0.42,field2=42 ``` diff --git a/plugins/parsers/graphite/README.md b/plugins/parsers/graphite/README.md index 63d7c936ae819..4cc669185ca76 100644 --- a/plugins/parsers/graphite/README.md +++ b/plugins/parsers/graphite/README.md @@ -6,7 +6,7 @@ By default, the separator is left as `.`, but this can be changed using the `separator` argument. For more advanced options, Telegraf supports specifying [templates](#templates) to translate graphite buckets into Telegraf metrics. -### Configuration +## Configuration ```toml [[inputs.exec]] @@ -42,7 +42,7 @@ By default, the separator is left as `.`, but this can be changed using the ] ``` -#### templates +### templates Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for details. diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md index 80936a41dbf62..7a0d0ad661fa6 100644 --- a/plugins/parsers/grok/README.md +++ b/plugins/parsers/grok/README.md @@ -4,13 +4,12 @@ The grok data format parses line delimited data using a regular expression like language. The best way to get acquainted with grok patterns is to read the logstash docs, -which are available here: - https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +which are available [here](https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html). The grok parser uses a slightly modified version of logstash "grok" patterns, with the format: -``` +```text %{[:][:]} ``` @@ -58,7 +57,7 @@ CUSTOM time layouts must be within quotes and be the representation of the "reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` To match a comma decimal point you can use a period in the pattern string. -See https://golang.org/pkg/time/#Parse for more details. +See [Goloang Time docs](https://golang.org/pkg/time/#Parse) for more details. Telegraf has many of its own [built-in patterns][] as well as support for most of the Logstash builtin patterns using [these Go compatible patterns][grok-patterns]. @@ -71,9 +70,10 @@ friendly pattern that is not fully compatible with the Logstash pattern. [grok-patterns]: https://github.com/vjeantet/grok/blob/master/patterns/grok-patterns If you need help building patterns to match your logs, -you will find the https://grokdebug.herokuapp.com application quite useful! +you will find the [Grok Debug](https://grokdebug.herokuapp.com) application quite useful! + +## Configuration -### Configuration ```toml [[inputs.file]] ## Files to parse each interval. @@ -121,11 +121,11 @@ you will find the https://grokdebug.herokuapp.com application quite useful! # grok_unique_timestamp = "auto" ``` -#### Timestamp Examples +### Timestamp Examples This example input and config parses a file using a custom timestamp conversion: -``` +```text 2017-02-21 13:10:34 value=42 ``` @@ -136,7 +136,7 @@ This example input and config parses a file using a custom timestamp conversion: This example input and config parses a file using a timestamp in unix time: -``` +```text 1466004605 value=42 1466004605.123456789 value=42 ``` @@ -148,7 +148,7 @@ This example input and config parses a file using a timestamp in unix time: This example parses a file using a built-in conversion and a custom pattern: -``` +```text Wed Apr 12 13:10:34 PST 2017 value=42 ``` @@ -162,7 +162,7 @@ Wed Apr 12 13:10:34 PST 2017 value=42 This example input and config parses a file using a custom timestamp conversion that doesn't match any specific standard: -``` +```text 21/02/2017 13:10:34 value=42 ``` @@ -192,7 +192,7 @@ syntax with `'''` may be useful. The following config examples will parse this input file: -``` +```text |42|\uD83D\uDC2F|'telegraf'| ``` @@ -208,6 +208,7 @@ backslash must be escaped, requiring us to escape the backslash a second time. We cannot use a literal TOML string for the pattern, because we cannot match a `'` within it. However, it works well for the custom pattern. + ```toml [[inputs.file]] grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] @@ -215,6 +216,7 @@ We cannot use a literal TOML string for the pattern, because we cannot match a ``` A multi-line literal string allows us to encode the pattern: + ```toml [[inputs.file]] grok_patterns = [''' @@ -251,7 +253,8 @@ are a few techniques that can help: - Avoid using patterns such as `%{DATA}` that will always match. - If possible, add `^` and `$` anchors to your pattern: - ``` + + ```toml [[inputs.file]] grok_patterns = ["^%{COMBINED_LOG_FORMAT}$"] ``` diff --git a/plugins/parsers/influx/README.md b/plugins/parsers/influx/README.md index 874bb279d5a77..b0624e21759ea 100644 --- a/plugins/parsers/influx/README.md +++ b/plugins/parsers/influx/README.md @@ -5,7 +5,7 @@ metrics are parsed directly into Telegraf metrics. [line protocol]: https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/ -### Configuration +## Configuration ```toml [[inputs.file]] @@ -17,4 +17,3 @@ metrics are parsed directly into Telegraf metrics. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` - diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index 682a0c62b56cb..48fd3234a69ef 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -4,9 +4,9 @@ The JSON data format parses a [JSON][json] object or an array of objects into metric fields. **NOTE:** All JSON numbers are converted to float fields. JSON strings and booleans are -ignored unless specified in the `tag_key` or `json_string_fields` options. +ignored unless specified in the `tag_key` or `json_string_fields` options. -### Configuration +## Configuration ```toml [[inputs.file]] @@ -73,7 +73,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. json_timezone = "" ``` -#### json_query +### json_query The `json_query` is a [GJSON][gjson] path that can be used to transform the JSON document before being parsed. The query is performed before any other @@ -85,7 +85,7 @@ Consult the GJSON [path syntax][gjson syntax] for details and examples, and consider using the [GJSON playground][gjson playground] for developing and debugging your query. -#### json_time_key, json_time_format, json_timezone +### json_time_key, json_time_format, json_timezone By default the current time will be used for all created metrics, to set the time using the JSON document you can use the `json_time_key` and @@ -106,10 +106,12 @@ to be UTC. To default to another timezone, or to local time, specify the [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. -### Examples +## Examples + +### Basic Parsing -#### Basic Parsing Config: + ```toml [[inputs.file]] files = ["example"] @@ -118,6 +120,7 @@ Config: ``` Input: + ```json { "a": 5, @@ -129,13 +132,15 @@ Input: ``` Output: -``` + +```text myjsonmetric a=5,b_c=6 ``` -#### Name, Tags, and String Fields +### Name, Tags, and String Fields Config: + ```toml [[inputs.file]] files = ["example"] @@ -146,6 +151,7 @@ Config: ``` Input: + ```json { "a": 5, @@ -159,16 +165,18 @@ Input: ``` Output: -``` + +```text my_json,my_tag_1=foo a=5,b_c=6,b_my_field="description" ``` -#### Arrays +### Arrays If the JSON data is an array, then each object within the array is parsed with the configured settings. Config: + ```toml [[inputs.file]] files = ["example"] @@ -178,6 +186,7 @@ Config: ``` Input: + ```json [ { @@ -198,16 +207,18 @@ Input: ``` Output: -``` + +```text file a=5,b_c=6 1136387040000000000 file a=7,b_c=8 1168527840000000000 ``` -#### Query +### Query The `json_query` option can be used to parse a subset of the document. Config: + ```toml [[inputs.file]] files = ["example"] @@ -218,6 +229,7 @@ Config: ``` Input: + ```json { "obj": { @@ -235,7 +247,8 @@ Input: ``` Output: -``` + +```text file,first=Dale last="Murphy",age=44 file,first=Roger last="Craig",age=68 file,first=Jane last="Murphy",age=47 diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index d1e2e9c407255..5ae80332e8ce1 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -1,6 +1,6 @@ # JSON Parser - Version 2 -This parser takes valid JSON input and turns it into line protocol. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. +This parser takes valid JSON input and turns it into line protocol. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: [gjson.dev/](https://gjson.dev). You can find multiple examples under the `testdata` folder. ## Configuration @@ -79,13 +79,13 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. The notable difference between `field` and `tag`, is that `tag` values will always be type string while `field` can be multiple types. You can define the type of `field` to be any [type that line protocol supports](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/#data-types-and-format), which are: + * float * int * uint * string * bool - #### **field** Using this field configuration you can gather a non-array/non-object values. Note this acts as a global field when used with the `object` configuration, if you gather an array of values using `object` then the field gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. @@ -98,7 +98,6 @@ Using this field configuration you can gather a non-array/non-object values. Not Using this tag configuration you can gather a non-array/non-object values. Note this acts as a global tag when used with the `object` configuration, if you gather an array of values using `object` then the tag gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. - * **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. @@ -193,7 +192,7 @@ Example configuration: Expected line protocol: -``` +```text file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past" file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",name="Bilbo",species="hobbit" diff --git a/plugins/parsers/logfmt/README.md b/plugins/parsers/logfmt/README.md index d3e8ab66f534f..4b19aa34752ea 100644 --- a/plugins/parsers/logfmt/README.md +++ b/plugins/parsers/logfmt/README.md @@ -4,7 +4,7 @@ The `logfmt` data format parses data in [logfmt] format. [logfmt]: https://brandur.org/logfmt -### Configuration +## Configuration ```toml [[inputs.file]] @@ -17,14 +17,14 @@ The `logfmt` data format parses data in [logfmt] format. data_format = "logfmt" ``` -### Metrics +## Metrics Each key/value pair in the line is added to a new metric as a field. The type of the field is automatically determined based on the contents of the value. -### Examples +## Examples -``` +```text - method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 + logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i ``` diff --git a/plugins/parsers/nagios/README.md b/plugins/parsers/nagios/README.md index e9be6a0dd8873..2b9255db08ec6 100644 --- a/plugins/parsers/nagios/README.md +++ b/plugins/parsers/nagios/README.md @@ -2,7 +2,7 @@ The `nagios` data format parses the output of nagios plugins. -### Configuration +## Configuration ```toml [[inputs.exec]] diff --git a/plugins/parsers/prometheusremotewrite/README.md b/plugins/parsers/prometheusremotewrite/README.md index 6d2c17ef898dc..1a43b26ae73c8 100644 --- a/plugins/parsers/prometheusremotewrite/README.md +++ b/plugins/parsers/prometheusremotewrite/README.md @@ -2,7 +2,7 @@ Converts prometheus remote write samples directly into Telegraf metrics. It can be used with [http_listener_v2](/plugins/inputs/http_listener_v2). There are no additional configuration options for Prometheus Remote Write Samples. -### Configuration +## Configuration ```toml [[inputs.http_listener_v2]] @@ -16,31 +16,33 @@ Converts prometheus remote write samples directly into Telegraf metrics. It can data_format = "prometheusremotewrite" ``` -### Example Input -``` +## Example Input + +```json prompb.WriteRequest{ - Timeseries: []*prompb.TimeSeries{ - { - Labels: []*prompb.Label{ - {Name: "__name__", Value: "go_gc_duration_seconds"}, - {Name: "instance", Value: "localhost:9090"}, - {Name: "job", Value: "prometheus"}, - {Name: "quantile", Value: "0.99"}, - }, - Samples: []prompb.Sample{ - {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, - }, - }, - }, - } + Timeseries: []*prompb.TimeSeries{ + { + Labels: []*prompb.Label{ + {Name: "__name__", Value: "go_gc_duration_seconds"}, + {Name: "instance", Value: "localhost:9090"}, + {Name: "job", Value: "prometheus"}, + {Name: "quantile", Value: "0.99"}, + }, + Samples: []prompb.Sample{ + {Value: 4.63, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + }, + }, + }, + } ``` -### Example Output -``` +## Example Output + +```text prometheus_remote_write,instance=localhost:9090,job=prometheus,quantile=0.99 go_gc_duration_seconds=4.63 1614889298859000000 ``` ## For alignment with the [InfluxDB v1.x Prometheus Remote Write Spec](https://docs.influxdata.com/influxdb/v1.8/supported_protocols/prometheus/#how-prometheus-metrics-are-parsed-in-influxdb) -- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. +- Use the [Starlark processor rename prometheus remote write script](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/testdata/rename_prometheus_remote_write.star) to rename the measurement name to the fieldname and rename the fieldname to value. diff --git a/plugins/parsers/value/README.md b/plugins/parsers/value/README.md index db184d4e8307a..990250a7f76ed 100644 --- a/plugins/parsers/value/README.md +++ b/plugins/parsers/value/README.md @@ -4,7 +4,7 @@ The "value" data format translates single values into Telegraf metrics. This is done by assigning a measurement name and setting a single field ("value") as the parsed metric. -### Configuration +## Configuration You **must** tell Telegraf what type of metric to collect by using the `data_type` configuration option. Available options are: @@ -33,4 +33,3 @@ name of the plugin. data_format = "value" data_type = "integer" # required ``` - diff --git a/plugins/parsers/wavefront/README.md b/plugins/parsers/wavefront/README.md index ab7c56eed2b2e..ef55fca1ac085 100644 --- a/plugins/parsers/wavefront/README.md +++ b/plugins/parsers/wavefront/README.md @@ -4,7 +4,7 @@ Wavefront Data Format is metrics are parsed directly into Telegraf metrics. For more information about the Wavefront Data Format see [here](https://docs.wavefront.com/wavefront_data_format.html). -### Configuration +## Configuration There are no additional configuration options for Wavefront Data Format line-protocol. diff --git a/plugins/parsers/xpath/README.md b/plugins/parsers/xpath/README.md index 09823bbacf982..bb3f4f60a7279 100644 --- a/plugins/parsers/xpath/README.md +++ b/plugins/parsers/xpath/README.md @@ -6,7 +6,8 @@ For supported XPath functions check [the underlying XPath library][xpath lib]. **NOTE:** The type of fields are specified using [XPath functions][xpath lib]. The only exception are *integer* fields that need to be specified in a `fields_int` section. -### Supported data formats +## Supported data formats + | name | `data_format` setting | comment | | --------------------------------------- | --------------------- | ------- | | [Extensible Markup Language (XML)][xml] | `"xml"` | | @@ -14,11 +15,14 @@ For supported XPath functions check [the underlying XPath library][xpath lib]. | [MessagePack][msgpack] | `"xpath_msgpack"` | | | [Protocol buffers][protobuf] | `"xpath_protobuf"` | [see additional parameters](protocol-buffers-additiona-settings)| -#### Protocol buffers additional settings +### Protocol buffers additional settings + For using the protocol-buffer format you need to specify a protocol buffer definition file (`.proto`) in `xpath_protobuf_file`, Furthermore, you need to specify which message type you want to use via `xpath_protobuf_type`. -### Configuration (explicit) +## Configuration (explicit) + In this configuration mode, you explicitly specify the field and tags you want to scrape out of your data. + ```toml [[inputs.file]] files = ["example.xml"] @@ -82,6 +86,7 @@ your query. Alternatively to the configuration above, fields can also be specified in a batch way. So contrary to specify the fields in a section, you can define a `name` and a `value` selector used to determine the name and value of the fields in the metric. + ```toml [[inputs.file]] files = ["example.xml"] @@ -137,11 +142,12 @@ metric. device = "string('the ultimate sensor')" ``` + *Please note*: The resulting fields are _always_ of type string! It is also possible to specify a mixture of the two alternative ways of specifying fields. -#### metric_selection (optional) +### metric_selection (optional) You can specify a [XPath][xpath] query to select a subset of nodes from the XML document, each used to generate a new metrics with the specified fields, tags etc. @@ -150,11 +156,11 @@ For relative queries in subsequent queries they are relative to the `metric_sele Specifying `metric_selection` is optional. If not specified all relative queries are relative to the root node of the XML document. -#### metric_name (optional) +### metric_name (optional) By specifying `metric_name` you can override the metric/measurement name with the result of the given [XPath][xpath] query. If not specified, the default metric name is used. -#### timestamp, timestamp_format (optional) +### timestamp, timestamp_format (optional) By default the current time will be used for all created metrics. To set the time from values in the XML document you can specify a [XPath][xpath] query in `timestamp` and set the format in `timestamp_format`. @@ -162,19 +168,19 @@ The `timestamp_format` can be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or an accepted [Go "reference time"][time const]. Consult the Go [time][time parse] package for details and additional examples on how to set the time format. If `timestamp_format` is omitted `unix` format is assumed as result of the `timestamp` query. -#### tags sub-section +### tags sub-section [XPath][xpath] queries in the `tag name = query` format to add tags to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. **NOTE:** Results of tag-queries will always be converted to strings. -#### fields_int sub-section +### fields_int sub-section [XPath][xpath] queries in the `field name = query` format to add integer typed fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. **NOTE:** Results of field_int-queries will always be converted to **int64**. The conversion will fail in case the query result is not convertible! -#### fields sub-section +### fields sub-section [XPath][xpath] queries in the `field name = query` format to add non-integer fields to the metrics. The specified path can be absolute (starting with `/`) or relative. Relative paths use the currently selected node as reference. @@ -183,8 +189,7 @@ If no conversion is performed in the query the field will be of type string. **NOTE: Path conversion functions will always succeed even if you convert a text to float!** - -#### field_selection, field_name, field_value (optional) +### field_selection, field_name, field_value (optional) You can specify a [XPath][xpath] query to select a set of nodes forming the fields of the metric. The specified path can be absolute (starting with `/`) or relative to the currently selected node. Each node selected by `field_selection` forms a new field within the metric. @@ -195,15 +200,16 @@ Specifying `field_selection` is optional. This is an alternative way to specify **NOTE: Path conversion functions will always succeed even if you convert a text to float!** -#### field_name_expansion (optional) +### field_name_expansion (optional) When *true*, field names selected with `field_selection` are expanded to a *path* relative to the *selected node*. This is necessary if we e.g. select all leaf nodes as fields and those leaf nodes do not have unique names. That is in case you have duplicate names in the fields you select you should set this to `true`. -### Examples +## Examples This `example.xml` file is used in the configuration examples below: + ```xml @@ -238,11 +244,12 @@ This `example.xml` file is used in the configuration examples below: ``` -#### Basic Parsing +### Basic Parsing This example shows the basic usage of the xml parser. Config: + ```toml [[inputs.file]] files = ["example.xml"] @@ -260,18 +267,20 @@ Config: ``` Output: -``` + +```text file,gateway=Main,host=Hugin seqnr=12i,ok=true 1598610830000000000 ``` In the *tags* definition the XPath function `substring-before()` is used to only extract the sub-string before the space. To get the integer value of `/Gateway/Sequence` we have to use the *fields_int* section as there is no XPath expression to convert node values to integers (only float). The `ok` field is filled with a boolean by specifying a query comparing the query result of `/Gateway/Status` with the string *ok*. Use the type conversions available in the XPath syntax to specify field types. -#### Time and metric names +### Time and metric names This is an example for using time and name of the metric from the XML document itself. Config: + ```toml [[inputs.file]] files = ["example.xml"] @@ -291,16 +300,19 @@ Config: ``` Output: -``` + +```text Status,gateway=Main,host=Hugin ok=true 1596294243000000000 ``` + Additionally to the basic parsing example, the metric name is defined as the name of the `/Gateway/Status` node and the timestamp is derived from the XML document instead of using the execution time. -#### Multi-node selection +### Multi-node selection For XML documents containing metrics for e.g. multiple devices (like `Sensor`s in the *example.xml*), multiple metrics can be generated using node selection. This example shows how to generate a metric for each *Sensor* in the example. Config: + ```toml [[inputs.file]] files = ["example.xml"] @@ -329,7 +341,8 @@ Config: ``` Output: -``` + +```text sensors,host=Hugin,name=Facility\ A consumers=3i,frequency=49.78,ok=true,power=123.4,temperature=20 1596294243000000000 sensors,host=Hugin,name=Facility\ B consumers=1i,frequency=49.78,ok=true,power=14.3,temperature=23.1 1596294243000000000 sensors,host=Hugin,name=Facility\ C consumers=0i,frequency=49.78,ok=false,power=0.02,temperature=19.7 1596294243000000000 @@ -337,11 +350,12 @@ sensors,host=Hugin,name=Facility\ C consumers=0i,frequency=49.78,ok=false,power= Using the `metric_selection` option we select all `Sensor` nodes in the XML document. Please note that all field and tag definitions are relative to these selected nodes. An exception is the timestamp definition which is relative to the root node of the XML document. -#### Batch field processing with multi-node selection +### Batch field processing with multi-node selection For XML documents containing metrics with a large number of fields or where the fields are not known before (e.g. an unknown set of `Variable` nodes in the *example.xml*), field selectors can be used. This example shows how to generate a metric for each *Sensor* in the example with fields derived from the *Variable* nodes. Config: + ```toml [[inputs.file]] files = ["example.xml"] @@ -363,7 +377,8 @@ Config: ``` Output: -``` + +```text sensors,host=Hugin,name=Facility\ A consumers=3,frequency=49.78,power=123.4,temperature=20 1596294243000000000 sensors,host=Hugin,name=Facility\ B consumers=1,frequency=49.78,power=14.3,temperature=23.1 1596294243000000000 sensors,host=Hugin,name=Facility\ C consumers=0,frequency=49.78,power=0.02,temperature=19.7 1596294243000000000 From 4df32096c946a55a5a436dda158c123bb6fc1f8e Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:31 -0700 Subject: [PATCH 066/133] chore: clean up all markdown lint errors in root directory (#10154) --- CHANGELOG.md | 1529 ++++++++++++++++++++++--------------------- CONTRIBUTING.md | 31 +- EXTERNAL_PLUGINS.md | 12 +- SECURITY.md | 2 +- 4 files changed, 817 insertions(+), 757 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ed54e1ff44f17..ef451b401d152 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,271 +1,269 @@ + + +# Change Log + ## v1.20.4 [2021-11-17] -#### Release Notes +### Release Notes - - [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 - - [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation +- [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 +- [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation Thank you to @zak-pawel for lots of linter fixes! - - [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* - - [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* - - [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* - - [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* - -#### Bugfixes - - - [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 - - [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI - - [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 - - [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up - - [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int - - [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd - - [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library - - [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" - - [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly - - [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 - - [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver - - [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs - - [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling - -#### Features -#### New Input Plugins -#### New Output Plugins -#### New External Plugins +- [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* +- [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* +- [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* +- [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +### Bugfixes + +- [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 +- [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI +- [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 +- [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up +- [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int +- [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd +- [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library +- [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" +- [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly +- [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 +- [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver +- [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs +- [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling ## v1.20.3 [2021-10-27] -#### Release Notes - - - [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 - -#### Bugfixes - - - [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 - - [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs - - [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps - - [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 - - [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation - - [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests - - [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library - - [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys - - [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD - - [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory - - [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin - - [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field - - [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels - - [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size - - [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook - - [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset - - [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 - - [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 - - [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 - - [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql - - [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible - - [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place - - [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 - - [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 - - [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage - - [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 - -#### New External Plugins - - - [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka - - [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka +### Release Notes + +- [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +### Bugfixes + +- [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 +- [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs +- [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps +- [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 +- [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation +- [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests +- [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library +- [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys +- [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD +- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory +- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin +- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field +- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels +- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size +- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook +- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset +- [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 +- [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 +- [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 +- [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql +- [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible +- [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place +- [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 +- [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 +- [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage +- [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +### New External Plugins + +- [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka +- [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka ## v1.20.2 [2021-10-07] -#### Bugfixes +### Bugfixes - - [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API - - [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields - - [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser - - [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 - - [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built +- [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API +- [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields +- [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser +- [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 +- [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built ## v1.20.1 [2021-10-06] -#### Bugfixes - - - [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 - - [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 - - [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 - - [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference - - [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging - - [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config - - [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags - - [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version - - [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing - - [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation - - [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client - - [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 - - [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module - - [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 - - [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 - - [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 - -#### Features - - - [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field +### Bugfixes + +- [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 +- [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 +- [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 +- [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference +- [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging +- [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config +- [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags +- [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version +- [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing +- [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation +- [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client +- [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module +- [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 +- [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 +- [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +### Features + +- [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field ## v1.20.0 [2021-09-17] -#### Release Notes - - - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 - -#### Bugfixes - - - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 - - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests - - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 - - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 - - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives - - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds - - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 - - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version - - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value - - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. - - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 - - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query - - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 - - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats - - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 - - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 - - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names - - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 - - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 - - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error - - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging - - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 - - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module - - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak - - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting - -#### Features - - - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support - - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype - - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces - - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider - - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP - - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children - - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type - - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog - - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page - - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag - - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins - - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support - - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name - - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser - - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance - - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url - - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status - - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) - - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 - -#### New Input Plugins - - - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs - - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection - - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input - - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin - -#### New Output Plugins - - - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output - - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output +### Release Notes + +- [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +### Bugfixes + +- [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 +- [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests +- [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 +- [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 +- [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives +- [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds +- [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 +- [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version +- [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value +- [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. +- [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 +- [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query +- [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 +- [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats +- [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 +- [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 +- [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names +- [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 +- [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 +- [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error +- [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging +- [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 +- [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module +- [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak +- [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting +### Features + +- [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support +- [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype +- [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces +- [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider +- [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP +- [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children +- [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type +- [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog +- [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page +- [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag +- [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins +- [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support +- [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name +- [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser +- [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance +- [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url +- [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status +- [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) +- [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 + +### New Input Plugins + +- [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs +- [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection +- [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input +- [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin + +### New Output Plugins + +- [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output +- [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output ## v1.19.3 [2021-08-18] -#### Bugfixes - - - [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 - - [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 - - [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 - - [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 - - [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery - - [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation - - [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores - - [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set - - [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection - - [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api - - [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki - - [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 - - [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error - - [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 - - [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path - - [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 - - [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 - - [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered +### Bugfixes + +- [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 +- [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 +- [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 +- [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery +- [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation +- [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores +- [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set +- [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection +- [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api +- [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki +- [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 +- [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error +- [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 +- [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path +- [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 +- [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 +- [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered ## v1.19.2 [2021-07-28] -#### Release Notes - - - [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 - -#### Bugfixes - - - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions - - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written - - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims - - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting - - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column - - [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* - - [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics - - [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name - - [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics - - [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection - - [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled - - [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure - - [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map - - [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication - - [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers - - [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles - - [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log - - [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 - - [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups - - [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly - -#### Features - - - [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified +### Release Notes + +- [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +### Bugfixes + +- [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions +- [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written +- [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims +- [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting +- [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column +- [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* +- [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics +- [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name +- [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics +- [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection +- [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled +- [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure +- [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map +- [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication +- [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers +- [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles +- [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log +- [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 +- [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups +- [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +### Features + +- [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified ## v1.19.1 [2021-07-07] -#### Bugfixes - - - [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified - - [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory - - [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic - - [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic - - [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic - - [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression - - [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 - - [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error - - [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error - - [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 - - [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 - - [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 - - [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics - - [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify - - [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https - - [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 - - [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys - - [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support - - [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 - - [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* +### Bugfixes + +- [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified +- [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory +- [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic +- [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic +- [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic +- [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression +- [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 +- [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error +- [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error +- [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 +- [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 +- [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 +- [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics +- [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify +- [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https +- [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 +- [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys +- [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support +- [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 +- [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* ## v1.19.0 [2021-06-17] -#### Release Notes +### Release Notes - Many linter fixes - thanks @zak-pawel and all! - [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 -#### Bugfixes +### Bugfixes - [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 - [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: @@ -282,7 +280,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format - [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries -#### Features +### Features - [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL - [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output @@ -316,7 +314,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option - [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric -#### New Input Plugins +### New Input Plugins - [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble @@ -324,18 +322,18 @@ Thank you to @zak-pawel for lots of linter fixes! - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda - [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan -#### New Output Plugins +### New Output Plugins - [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia - [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio - [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov -#### New Parser Plugins +### New Parser Plugins - [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa - [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink -#### New External Plugins +### New External Plugins - [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon - [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard @@ -344,486 +342,486 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.18.3 [2021-05-20] -#### Release Notes - - - Added FreeBSD armv7 build - -#### Bugfixes +### Release Notes - - [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics - - [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 - - [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error - - [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query - - [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 - - [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs - - [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 - - [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 - - [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 - - [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 - - [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 - - [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 - - [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 - - [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go +- Added FreeBSD armv7 build + +### Bugfixes + +- [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics +- [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 +- [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error +- [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query +- [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 +- [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs +- [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 +- [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 +- [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 +- [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 +- [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 +- [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 +- [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 +- [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go -#### Features +### Features - - [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression +- [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression ## v1.18.2 [2021-04-28] -#### Bugfixes +### Bugfixes - - [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings - - [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo - - [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls - - [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write - - [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures - - [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner - - [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later - - [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name - - [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling +- [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings +- [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo +- [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls +- [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write +- [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures +- [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner +- [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later +- [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name +- [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling ## v1.18.1 [2021-04-07] -#### Bugfixes - - - [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 - - [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override - - [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat - - [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed - - [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode - - [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently - - [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id - - [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object - - [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation - - [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic - - [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats - - [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode - - [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg +### Bugfixes + +- [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 +- [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override +- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat +- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed +- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode +- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently +- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id +- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object +- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation +- [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic +- [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats +- [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode +- [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg ## v1.18.0 [2021-03-17] -#### Release Notes - - - Support Go version 1.16.2 - - Added support for code signing in Windows - -#### Bugfixes - - - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice - - [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling - - [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list - - [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin - - [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues - - [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count - - [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions - - [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin - - [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug - - [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types - - [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache - - [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser - - [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies - - [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. - - [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true - - [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL - - [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set - - [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file - -#### Features - - - [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin - - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality - - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy - - [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) - - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest - - [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols - - [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging - - [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin - - [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric - - [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url - - [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows - - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script - - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin - - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input - - [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON - - [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) - - [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients - - [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only - -#### New Inputs - - [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch - - [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog - - [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData - - [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey - - [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga - -#### New Outputs - - [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac - - [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura - - [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey - - [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb - -#### New Aggregators - - [Derivative Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative)- Contributed by @KarstenSchnitter - - [Quantile Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan - -#### New Processors - - [AWS EC2 Metadata Processor Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo - -#### New Parsers - - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan - -#### New Serializers - - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox - -#### New External Plugins - - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali - - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat - - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope +### Release Notes -## v1.17.3 [2021-02-17] +- Support Go version 1.16.2 +- Added support for code signing in Windows + +### Bugfixes + +- [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice +- [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling +- [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list +- [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin +- [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues +- [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count +- [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions +- [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin +- [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug +- [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types +- [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache +- [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser +- [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies +- [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. +- [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true +- [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL +- [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set +- [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file + +### Features -#### Bugfixes +- [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin +- [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality +- [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy +- [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) +- [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest +- [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols +- [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging +- [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin +- [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric +- [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url +- [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows +- [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script +- [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin +- [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input +- [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON +- [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) +- [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients +- [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only - - [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files - - [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 - - [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 - - [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value - - [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue - - [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt - - [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors - - [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper - - [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config - - [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline - - [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux - - [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version +### New Inputs +- [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch +- [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog +- [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData +- [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey +- [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga -## v1.17.2 [2021-01-28] +### New Outputs + +- [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac +- [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura +- [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey +- [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb + +### New Aggregators + +- [Derivative Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative) - Contributed by @KarstenSchnitter +- [Quantile Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan + +### New Processors + +- [AWS EC2 Metadata Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo + +### New Parsers -#### Bugfixes +- [XML Parser Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan - - [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native - - [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function +### New Serializers + +- [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox +### New External Plugins + +- [GeoIP Processor Plugin](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali +- [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat +- [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + +## v1.17.3 [2021-02-17] + +### Bugfixes + +- [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files +- [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 +- [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 +- [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value +- [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue +- [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt +- [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors +- [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper +- [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config +- [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline +- [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux +- [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + +## v1.17.2 [2021-01-28] + +### Bugfixes + +- [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native +- [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function ## v1.17.1 [2021-01-27] -#### Release Notes - - Included a few more changes that add configuration options to plugins as it's been while since the last release - - - [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool - - [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows - - [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible - - [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser - - [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout - - [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames - - [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C - - [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input - - [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin - - [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection - -#### Bugfixes - - - [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses - - [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors - - [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. - - [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value - - [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. - - [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin - - [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue - - [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 - - [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 - - [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 - - [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. - - [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. - - [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. - - [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge - - [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses - - [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 - - [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start - -#### New External Plugins - - - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin +### Release Notes +Included a few more changes that add configuration options to plugins as it's been while since the last release + +- [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool +- [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows +- [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible +- [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser +- [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout +- [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames +- [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C +- [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input +- [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin +- [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +### Bugfixes + +- [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses +- [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors +- [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. +- [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value +- [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. +- [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin +- [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue +- [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 +- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 +- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 +- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. +- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. +- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. +- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge +- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses +- [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 +- [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + +### New External Plugins + +- [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin ## v1.17.0 [2020-12-18] -#### Release Notes - - - Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. - - New input plugins: Riemann-Protobuff Listener, Intel PowerStat - - New output plugins: Yandex.Cloud monitoring, Logz.io - - New parser plugin: Prometheus - - New serializer: Prometheus remote write - -#### Bugfixes - - - [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter - - [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. - - [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests - - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test - - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 - - [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition - - [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write - - [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there - - [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data - - [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser - - [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers - - [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits - - [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory - -#### Features - - - [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement - - [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin - - [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI - - [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries - - [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. - - [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers - - [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries - - [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call - - [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB - - [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] - - [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty - - [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin - - [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le - - [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) - - [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data - - [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input - - [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin - - [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input - - [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values - - [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes - - [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin - - [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 - - [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin - - [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin - - [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input - - [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics - - [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor - - [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added - - [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support - - -#### New Parser Plugins - - - [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus - -#### New Serializer Plugins - - - [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer - -#### New Input Plugins - - - [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener - - [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin - -#### New Output Plugins - - - [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring - - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin +### Release Notes +- Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. +- New input plugins: Riemann-Protobuff Listener, Intel PowerStat +- New output plugins: Yandex.Cloud monitoring, Logz.io +- New parser plugin: Prometheus +- New serializer: Prometheus remote write + +### Bugfixes + +- [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter +- [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. +- [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests +- [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test +- [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 +- [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition +- [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write +- [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there +- [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data +- [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser +- [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers +- [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits +- [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory -## v1.16.3 [2020-12-01] +### Features + +- [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement +- [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin +- [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI +- [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries +- [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. +- [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers +- [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries +- [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call +- [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB +- [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] +- [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty +- [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin +- [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le +- [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) +- [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data +- [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input +- [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin +- [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input +- [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values +- [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes +- [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin +- [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 +- [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin +- [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin +- [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input +- [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics +- [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor +- [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added +- [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + +### New Parser Plugins + +- [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +### New Serializer Plugins + +- [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + +### New Input Plugins + +- [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener +- [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin + +### New Output Plugins + +- [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring +- [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin -#### Bugfixes - - - [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 - - [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 - - [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype - - [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name - - [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" - - [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency - - [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor - - [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column - - [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output - - [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor - - [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark - - [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list - - [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging - - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors - - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function +## v1.16.3 [2020-12-01] +### Bugfixes + +- [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 +- [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 +- [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype +- [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name +- [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" +- [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency +- [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor +- [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column +- [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output +- [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor +- [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark +- [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list +- [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging +- [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors +- [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function ## v1.16.2 [2020-11-13] -#### Bugfixes - - - [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). - - [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs - - [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) - - [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme - - [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes - - [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config - - [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests - - [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 - - [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 - - [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag - - [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding - - [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 - - [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test - - [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin +### Bugfixes + +- [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). +- [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs +- [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) +- [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme +- [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes +- [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config +- [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests +- [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 +- [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 +- [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag +- [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding +- [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 +- [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test +- [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin ## v1.16.1 [2020-10-28] -#### Release Notes - - - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +### Release Notes -#### Bugfixes +- [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI - - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix - - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters - - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc - - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 - - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir - - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error - - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers +### Bugfixes +- [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix +- [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters +- [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc +- [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 +- [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir +- [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error +- [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers ## v1.16.0 [2020-10-21] -#### Release Notes - - - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release - - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor - -#### Features - - - [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" - - [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag - - [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter - - [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin - - [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type - - [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin - - [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents - - [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin - - [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option - - [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin - - [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin - - [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output - - [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric - - [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route - - [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark - - [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) - - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting - -#### Bugfixes - - - [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config - - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds - - [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions - - [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c - - [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF - - [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target - - [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types - - [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth - - [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure - - [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol - - [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters - - [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging - - [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic - - [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan - - [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping - - [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset - - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform - - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - -#### New Input Plugins - - - [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair - - [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak - - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient - - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv - -#### New Output Plugins - - - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo - - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest - -#### New External Plugins - - See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins - - - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +### Release Notes -## v1.15.4 [2020-10-20] +- New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) +- [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck +- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 +- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd +- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release +- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor -#### Bugfixes +### Features - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - - [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging +- [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" +- [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag +- [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter +- [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin +- [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type +- [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin +- [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents +- [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin +- [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option +- [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin +- [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin +- [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output +- [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric +- [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route +- [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark +- [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) +- [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag +- [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) +- [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands +- [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 +- [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code +- [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting + +### Bugfixes + +- [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config +- [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags +- [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed +- [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors +- [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements +- [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored +- [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF +- [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds +- [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions +- [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c +- [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF +- [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target +- [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types +- [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth +- [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure +- [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol +- [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters +- [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging +- [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic +- [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan +- [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping +- [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset +- [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression +- [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 +- [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform +- [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + +### New Input Plugins + +- [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair +- [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak +- [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode +- [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData +- [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient +- [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak +- [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv + +### New Output Plugins + +- [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue +- [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo +- [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest + +### New External Plugins + +See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins + +- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. +- [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos +- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. -## v1.15.3 [2020-09-11] +## v1.15.4 [2020-10-20] -#### Release Notes +### Bugfixes - - Many documentation updates - - New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd +- [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging -#### Bugfixes +## v1.15.3 [2020-09-11] + +### Release Notes - - [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition - - [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 - - [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError - - [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration - - [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer - - [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 - - [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec - - [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 - - [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests - - [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor - - [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url - - [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf +- Many documentation updates +- New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) + +### Bugfixes + +- [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition +- [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 +- [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError +- [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration +- [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer +- [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 +- [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec +- [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 +- [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests +- [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor +- [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url +- [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf ## v1.15.2 [2020-07-31] -#### Bug Fixes +### Bug Fixes - [#7905](https://github.com/influxdata/telegraf/issues/7905): Fix RPM /var/log/telegraf permissions - [#7880](https://github.com/influxdata/telegraf/issues/7880): Fix tail following on EOF ## v1.15.1 [2020-07-22] -#### Bug Fixes +### Bug Fixes - [#7877](https://github.com/influxdata/telegraf/pull/7877): Fix architecture in non-amd64 deb and rpm packages. ## v1.15.0 [2020-07-22] -#### Release Notes +### Release Notes - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. @@ -845,12 +843,12 @@ Thank you to @zak-pawel for lots of linter fixes! `/etc/telegraf/telegraf.conf.sample`. The tar and zip packages now include the version in the top level directory. -#### New Inputs +### New Inputs - [nginx_sts](/plugins/inputs/nginx_sts/README.md) - Contributed by @zdmytriv - [redfish](/plugins/inputs/redfish/README.md) - Contributed by @sarvanikonda -#### New Processors +### New Processors - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [execd](/plugins/processors/execd/README.md) - Contributed by @influxdata @@ -860,12 +858,12 @@ Thank you to @zak-pawel for lots of linter fixes! - [reverse_dns](/plugins/processors/reverse_dns/README.md) - Contributed by @influxdata - [starlark](/plugins/processors/starlark/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi - [execd](/plugins/outputs/execd/README.md) - Contributed by @influxdata -#### Features +### Features - [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. @@ -913,7 +911,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#7154](https://github.com/influxdata/telegraf/pull/7154): Add v3 metadata support to ecs input. - [#7792](https://github.com/influxdata/telegraf/pull/7792): Support utf-16 in file and tail inputs. -#### Bug Fixes +### Bug Fixes - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. @@ -932,7 +930,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14.5 [2020-06-30] -#### Bug Fixes +### Bug Fixes - [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve the performance of the procstat input. - [#7658](https://github.com/influxdata/telegraf/pull/7658): Fix ping exit code handling on non-Linux. @@ -944,7 +942,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14.4 [2020-06-09] -#### Bug Fixes +### Bug Fixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. - [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. @@ -953,7 +951,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14.3 [2020-05-19] -#### Bug Fixes +### Bug Fixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. - [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. @@ -962,7 +960,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14.2 [2020-04-28] -#### Bug Fixes +### Bug Fixes - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. @@ -976,7 +974,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14.1 [2020-04-14] -#### Bug Fixes +### Bug Fixes - [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. - [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. @@ -986,7 +984,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.14 [2020-03-26] -#### Release Notes +### Release Notes - In the `sqlserver` input, the `sqlserver_azurestats` measurement has been renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric @@ -995,7 +993,7 @@ Thank you to @zak-pawel for lots of linter fixes! - The `date` processor now uses the UTC timezone when creating its tag. In previous versions the local time was used. -#### New Inputs +### New Inputs - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen @@ -1007,17 +1005,17 @@ Thank you to @zak-pawel for lots of linter fixes! - [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI -#### New Processors +### New Processors - [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern - [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour -#### New Outputs +### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert -#### Features +### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. @@ -1056,7 +1054,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. - [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. -#### Bug Fixes +### Bug Fixes - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. @@ -1072,11 +1070,11 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.13.4 [2020-02-25] -#### Release Notes +### Release Notes - Official packages now built with Go 1.13.8. -#### Bug Fixes +### Bug Fixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. @@ -1088,7 +1086,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.13.3 [2020-02-04] -#### Bug Fixes +### Bug Fixes - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. @@ -1097,7 +1095,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.13.2 [2020-01-21] -#### Bug Fixes +### Bug Fixes - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. @@ -1109,7 +1107,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.13.1 [2020-01-08] -#### Bug Fixes +### Bug Fixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. @@ -1124,7 +1122,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.13 [2019-12-12] -#### Release Notes +### Release Notes - Official packages built with Go 1.13.5. This affects the minimum supported version on several platforms, most notably requiring Windows 7 (2008 R2) or @@ -1136,7 +1134,7 @@ Thank you to @zak-pawel for lots of linter fixes! passthrough metrics will be unchanged. Refer to the `prometheus` input for details about the mapping. -#### New Inputs +### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston @@ -1145,15 +1143,15 @@ Thank you to @zak-pawel for lots of linter fixes! - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream - [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer -#### New Processors +### New Processors - [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt -#### New Aggregators +### New Aggregators - [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata -#### Features +### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. @@ -1195,7 +1193,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. - [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. -#### Bug Fixes +### Bug Fixes - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. @@ -1212,7 +1210,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.6 [2019-11-19] -#### Bug Fixes +### Bug Fixes - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. @@ -1221,7 +1219,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.5 [2019-11-12] -#### Bug Fixes +### Bug Fixes - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. @@ -1235,11 +1233,11 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.4 [2019-10-23] -#### Release Notes +### Release Notes - Official packages built with Go 1.12.12. -#### Bug Fixes +### Bug Fixes - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. - [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. @@ -1247,7 +1245,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.3 [2019-10-07] -#### Bug Fixes +### Bug Fixes - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. @@ -1259,7 +1257,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.2 [2019-09-24] -#### Bug Fixes +### Bug Fixes - [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. @@ -1269,7 +1267,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12.1 [2019-09-10] -#### Bug Fixes +### Bug Fixes - [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. - [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. @@ -1282,14 +1280,14 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.12 [2019-09-03] -#### Release Notes +### Release Notes - The cluster health related fields in the elasticsearch input have been split out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. -#### New Inputs +### New Inputs - [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu @@ -1299,22 +1297,22 @@ Thank you to @zak-pawel for lots of linter fixes! - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao -#### New Parsers +### New Parsers - [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev -#### New Processors +### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata - [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata - [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo -#### Features +### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. @@ -1366,7 +1364,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. - [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. @@ -1383,7 +1381,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11.5 [2019-08-27] -#### Bug Fixes +### Bug Fixes - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. @@ -1396,7 +1394,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11.4 [2019-08-06] -#### Bug Fixes +### Bug Fixes - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. - [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. @@ -1404,7 +1402,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11.3 [2019-07-23] -#### Bug Fixes +### Bug Fixes - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. @@ -1417,7 +1415,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11.2 [2019-07-09] -#### Bug Fixes +### Bug Fixes - [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. - [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. @@ -1428,7 +1426,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11.1 [2019-06-25] -#### Bug Fixes +### Bug Fixes - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. @@ -1442,7 +1440,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.11 [2019-06-11] -#### Release Notes +### Release Notes - The `uptime_format` field in the system input has been deprecated, use the `uptime` field instead. @@ -1450,7 +1448,7 @@ Thank you to @zak-pawel for lots of linter fixes! requires `GetMetricData` permissions instead of `GetMetricStatistics`. The `units` tag is not available from this API and is no longer collected. -#### New Inputs +### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx @@ -1460,20 +1458,20 @@ Thank you to @zak-pawel for lots of linter fixes! - [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje -#### New Aggregators +### New Aggregators - [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto -#### New Outputs +### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo - [health](/plugins/outputs/health/README.md) - Contributed by @influxdata -#### New Serializers +### New Serializers - [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck -#### Features +### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. @@ -1505,7 +1503,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. - [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. @@ -1531,7 +1529,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.10.4 [2019-05-14] -#### Bug Fixes +### Bug Fixes - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. @@ -1545,20 +1543,20 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.10.3 [2019-04-16] -#### Bug Fixes +### Bug Fixes - [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. - [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. ## v1.10.2 [2019-04-02] -#### Release Notes +### Release Notes - String fields no longer have leading and trailing quotation marks removed in the grok parser. If you are capturing quoted strings you may need to update the patterns. -#### Bug Fixes +### Bug Fixes - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. @@ -1578,7 +1576,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.10.1 [2019-03-19] -#### Bug Fixes +### Bug Fixes - [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. @@ -1590,7 +1588,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.10 [2019-03-05] -#### New Inputs +### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata @@ -1601,16 +1599,16 @@ Thank you to @zak-pawel for lots of linter fixes! - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 - [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 -#### New Outputs +### New Outputs - [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye -#### New Serializers +### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller - [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno -#### Features +### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. @@ -1648,7 +1646,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. - [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. -#### Bug Fixes +### Bug Fixes - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. @@ -1664,7 +1662,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9.5 [2019-02-26] -#### Bug Fixes +### Bug Fixes - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. @@ -1678,7 +1676,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9.4 [2019-02-05] -#### Bug Fixes +### Bug Fixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. @@ -1687,7 +1685,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9.3 [2019-01-22] -#### Bug Fixes +### Bug Fixes - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. @@ -1698,7 +1696,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9.2 [2019-01-08] -#### Bug Fixes +### Bug Fixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. @@ -1717,7 +1715,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9.1 [2018-12-11] -#### Bug Fixes +### Bug Fixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. @@ -1732,7 +1730,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.9 [2018-11-20] -#### Release Notes +### Release Notes - The `http_listener` input plugin has been renamed to `influxdb_listener` and use of the original name is deprecated. The new name better describes the @@ -1750,7 +1748,7 @@ Thank you to @zak-pawel for lots of linter fixes! the new option `max_undelivered_messages` to limit the number of outstanding unwritten metrics. -#### New Inputs +### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe @@ -1759,11 +1757,11 @@ Thank you to @zak-pawel for lots of linter fixes! - [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment -#### New Outputs +### New Outputs - [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment -#### Features +### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. @@ -1786,7 +1784,7 @@ Thank you to @zak-pawel for lots of linter fixes! - [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. - [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. -#### Bug Fixes +### Bug Fixes - [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. - [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. @@ -2170,7 +2168,6 @@ Thank you to @zak-pawel for lots of linter fixes! - The new `http` input configured with `data_format = "json"` can perform the same task as the, now deprecated, `httpjson` input. - ### New Inputs - [http](./plugins/inputs/http/README.md) - Thanks to @grange74 @@ -2289,6 +2286,7 @@ Thank you to @zak-pawel for lots of linter fixes! ## v1.5 [2017-12-14] ### New Plugins + - [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno - [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv - [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge @@ -2619,7 +2617,7 @@ machines. Telegraf < 1.3: -``` +```text # field_name value active+clean 123 active+clean+scrubbing 3 @@ -2627,7 +2625,7 @@ active+clean+scrubbing 3 Telegraf >= 1.3: -``` +```text # field_name value tag count 123 state=active+clean count 3 state=active+clean+scrubbing @@ -2937,7 +2935,7 @@ that pertain to node vs. namespace statistics. This means that the default github_webhooks config: -``` +```toml # A Github Webhook Event collector [[inputs.github_webhooks]] ## Address and port to host Webhook listener on @@ -2946,7 +2944,7 @@ This means that the default github_webhooks config: should now look like: -``` +```toml # A Webhooks Event collector [[inputs.webhooks]] ## Address and port to host Webhook listener on @@ -3001,7 +2999,7 @@ consistent with the behavior of `collection_jitter`. - [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! - [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. - [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. -- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified +- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL `http://localhost:15672` if not specified - [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. - [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified - [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. @@ -3099,8 +3097,8 @@ to "stdout". ### Release Notes -- **Breaking change** in jolokia plugin. See -https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md +- **Breaking change** in jolokia plugin. See the +[jolokia README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md) for updated configuration. The plugin will now support proxy mode and will make POST requests. @@ -3205,14 +3203,16 @@ It is not included on the report path. This is necessary for reporting host disk ## v0.12.1 [2016-04-14] ### Release Notes + - Breaking change in the dovecot input plugin. See Features section below. -- Graphite output templates are now supported. See -https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +- Graphite output templates are now supported. See the +[Output Formats README](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite) - Possible breaking change for the librato and graphite outputs. Telegraf will no longer insert field names when the field is simply named `value`. This is because the `value` field is redundant in the graphite/librato context. ### Features + - [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra! - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. @@ -3225,6 +3225,7 @@ because the `value` field is redundant in the graphite/librato context. - [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin. ### Bug Fixes + - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) - [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! - [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! @@ -3233,6 +3234,7 @@ because the `value` field is redundant in the graphite/librato context. ## v0.12.0 [2016-04-05] ### Features + - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension @@ -3252,6 +3254,7 @@ because the `value` field is redundant in the graphite/librato context. - [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bug Fixes + - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. @@ -3266,21 +3269,23 @@ because the `value` field is redundant in the graphite/librato context. ## v0.11.1 [2016-03-17] ### Release Notes + - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features + - [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bug Fixes + - [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix - [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic ## v0.11.0 [2016-03-15] -### Release Notes - ### Features + - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! @@ -3298,6 +3303,7 @@ because the `value` field is redundant in the graphite/librato context. - [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics. ### Bug Fixes + - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! @@ -3313,15 +3319,18 @@ because the `value` field is redundant in the graphite/librato context. ## v0.10.4.1 ### Release Notes + - Bug in the build script broke deb and rpm packages. ### Bug Fixes + - [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken - [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken ## v0.10.4 [2016-02-24] ### Release Notes + - The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, to more accurately indicate their purpose. - There are also now namedrop/namepass parameters for passing/dropping based @@ -3329,6 +3338,7 @@ on the metric _name_. - Experimental windows builds now available. ### Features + - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! @@ -3336,12 +3346,14 @@ on the metric _name_. - [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath! ### Bug Fixes + - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. - [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters. ## v0.10.3 [2016-02-18] ### Release Notes + - Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and @@ -3358,6 +3370,7 @@ points and only flushing on a set time interval. This will default to `true` and is in the `[agent]` config section. ### Features + - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! @@ -3372,6 +3385,7 @@ and is in the `[agent]` config section. - [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes! ### Bug Fixes + - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. @@ -3381,6 +3395,7 @@ and is in the `[agent]` config section. ## v0.10.2 [2016-02-04] ### Release Notes + - Statsd timing measurements are now aggregated into a single measurement with fields. - Graphite output now inserts tags into the bucket in alphabetical order. @@ -3390,6 +3405,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by `insecure_skip_verify` ### Features + - [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse! - [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type. - [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch! @@ -3399,6 +3415,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by - [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support ### Bug Fixes + - [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements. - [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working. - [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong. @@ -3420,6 +3437,7 @@ for the latest measurements, fields, and tags. There is also now support for specifying a docker endpoint to get metrics from. ### Features + - [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! @@ -3444,6 +3462,7 @@ specifying a docker endpoint to get metrics from. - [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso! ### Bug Fixes + - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! @@ -3456,6 +3475,7 @@ specifying a docker endpoint to get metrics from. ## v0.10.0 [2016-01-12] ### Release Notes + - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` and configuration files are in `/etc/telegraf` - **breaking change** `plugins` have been renamed to `inputs`. This was done because @@ -3476,13 +3496,14 @@ instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). -### Packaging change note: +### Packaging change note RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their configurations overwritten by the upgrade. There is a backup stored at /etc/telegraf/telegraf.conf.$(date +%s).backup. ### Features + - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags - Added ability to specify per-plugin measurement suffix and prefix. @@ -3494,17 +3515,20 @@ configurations overwritten by the upgrade. There is a backup stored at ## v0.2.5 [unreleased] ### Features + - [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! - [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! - [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bug Fixes + - [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! - [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features + - [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! - [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! - [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters @@ -3515,12 +3539,14 @@ configurations overwritten by the upgrade. There is a backup stored at - [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bug Fixes + - [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue - [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] ### Release Notes + - **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`. and most of the config option names have changed. This only affects the kafka consumer _plugin_ (not the @@ -3530,7 +3556,7 @@ functional. - Plugins can now be specified as a list, and multiple plugin instances of the same type can be specified, like this: -``` +```toml [[inputs.cpu]] percpu = false totalcpu = true @@ -3545,6 +3571,7 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features + - [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! - [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. - [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! @@ -3552,21 +3579,25 @@ same type can be specified, like this: - [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bug Fixes + - [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. - [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] ### Release Notes + - 0.2.1 has a bug where all lists within plugins get duplicated, this includes lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bug Fixes + - [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] ### Release Notes + - Telegraf will no longer use docker-compose for "long" unit test, it has been changed to just run docker commands in the Makefile. See `make docker-run` and `make docker-kill`. `make test` will still run all unit tests with docker. @@ -3579,6 +3610,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features + - [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! - [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! - [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! @@ -3591,6 +3623,7 @@ same type. - [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bug Fixes + - [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. - [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. - [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes @@ -3599,6 +3632,7 @@ same type. ## v0.2.0 [2015-10-27] ### Release Notes + - The -test flag will now only output 2 collections for plugins that need it - There is a new agent configuration option: `flush_interval`. This option tells Telegraf how often to flush data to InfluxDB and other output sinks. For example, @@ -3615,6 +3649,7 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features + - [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info - [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini - [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin @@ -3639,6 +3674,7 @@ of metrics collected and from how many inputs. - [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bug Fixes + - [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! - [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! - [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! @@ -3651,6 +3687,7 @@ of metrics collected and from how many inputs. ## v0.1.9 [2015-09-22] ### Release Notes + - InfluxDB output config change: `url` is now `urls`, and is a list. Config files will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections @@ -3672,6 +3709,7 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features + - [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support - [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! - [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! @@ -3682,6 +3720,7 @@ re-added in a "verbose" mode if there is demand for it. and filtering when specifying a config file. ### Bug Fixes + - [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug @@ -3697,10 +3736,12 @@ and filtering when specifying a config file. ## v0.1.8 [2015-09-04] ### Release Notes + - Telegraf will now write data in UTC at second precision by default - Now using Go 1.5 to build telegraf ### Features + - [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 - [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes @@ -3714,6 +3755,7 @@ and filtering when specifying a config file. ## v0.1.7 [2015-08-28] ### Features + - [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. - [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. @@ -3723,6 +3765,7 @@ and filtering when specifying a config file. - Indent the toml config file for readability ### Bug Fixes + - [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. - [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! @@ -3731,11 +3774,13 @@ and filtering when specifying a config file. ## v0.1.6 [2015-08-20] ### Features + - [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! - [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies - [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bug Fixes + - [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility - [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! - [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! @@ -3744,6 +3789,7 @@ and filtering when specifying a config file. ## v0.1.5 [2015-08-13] ### Features + - [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! - [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! - [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! @@ -3762,6 +3808,7 @@ and filtering when specifying a config file. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bug Fixes + - [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users - [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes - [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama @@ -3771,31 +3818,37 @@ and filtering when specifying a config file. ## v0.1.4 [2015-07-09] ### Features + - [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bug Fixes + - [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! - [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features + - [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! - [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bug Fixes + - [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! - [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features + - [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! - [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. - [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! - [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bug Fixes + - [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. - [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! - [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d5732dcbfa1d1..60ac643ac7689 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -### Contributing +# Contributing 1. [Sign the CLA][cla]. 2. Open a [new issue][] to discuss the changes you would like to make. This is @@ -16,30 +16,32 @@ **Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -#### When will your contribution get released? +## When will your contribution get released? + We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. -#### Contributing an External Plugin +## Contributing an External Plugin Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. -#### Security Vulnerability Reporting +## Security Vulnerability Reporting + InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our -open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about -security vulnerability reporting, +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). -### GoDoc +## GoDoc Public interfaces for inputs, outputs, processors, aggregators, metrics, and the accumulator can be found in the GoDoc: [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) -### Common development tasks +## Common development tasks **Adding a dependency:** @@ -52,7 +54,7 @@ Telegraf uses Go modules. Assuming you can already build the project, run this i Before opening a pull request you should run the linter checks and the short tests. -``` +```shell make check make test ``` @@ -63,24 +65,27 @@ make test Running the integration tests requires several docker containers to be running. You can start the containers with: -``` + +```shell docker-compose up ``` To run only the integration tests use: -``` +```shell make test-integration ``` To run the full test suite use: -``` + +```shell make test-all ``` Use `make docker-kill` to stop the containers. ### For more developer resources + - [Code Style][codestyle] - [Deprecation][deprecation] - [Logging][logging] @@ -90,7 +95,7 @@ Use `make docker-kill` to stop the containers. - [Packaging][packaging] - [Profiling][profiling] - [Reviews][reviews] -- [Sample Config][sample config] +- [Sample Config][sample config] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 0de5ae47949d9..baa3ff1daf114 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -1,18 +1,19 @@ # External Plugins -This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). -Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. +This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). +Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. Pull requests welcome. ## Inputs + - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) - [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels @@ -27,8 +28,9 @@ Pull requests welcome. - [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS ## Outputs + - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. ## Processors - - [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. - + +- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. diff --git a/SECURITY.md b/SECURITY.md index 1d74711aa9079..5b72cf8634467 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,5 +2,5 @@ ## Reporting a Vulnerability -InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). From d5d1f310da033b49b45c5582d65d08b7a23f5fc3 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:38 -0700 Subject: [PATCH 067/133] chore: clean up all markdown lint errors in common plugins (#10155) --- plugins/common/shim/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/common/shim/README.md b/plugins/common/shim/README.md index 5453c90a4d548..e58249608ae48 100644 --- a/plugins/common/shim/README.md +++ b/plugins/common/shim/README.md @@ -4,6 +4,7 @@ The goal of this _shim_ is to make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This allows anyone to build and run it as a separate app using one of the execd plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) @@ -56,8 +57,8 @@ execd plugins: Refer to the execd plugin readmes for more information. -## Congratulations! +## Congratulations You've done it! Consider publishing your plugin to github and open a Pull Request back to the Telegraf repo letting us know about the availability of your -[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). \ No newline at end of file +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). From 97826bdc73252c232924fc2db013b00301259e0a Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:45:44 -0700 Subject: [PATCH 068/133] chore: clean up all markdown lint errors in second half of docs directory (#10156) --- docs/PROCESSORS.md | 74 ++++++++++++------------ docs/PROFILING.md | 3 +- docs/README.md | 2 +- docs/SQL_DRIVERS_INPUT.md | 9 ++- docs/TEMPLATE_PATTERN.md | 23 ++++---- docs/TLS.md | 39 +++++++------ docs/WINDOWS_SERVICE.md | 14 +++-- docs/developers/CODE_STYLE.md | 3 +- docs/developers/DEPRECATION.md | 18 ++++-- docs/developers/LOGGING.md | 6 +- docs/developers/METRIC_FORMAT_CHANGES.md | 7 +++ docs/developers/PACKAGING.md | 9 ++- docs/developers/PROFILING.md | 31 ++++++---- docs/developers/SAMPLE_CONFIG.md | 12 +++- docs/maintainers/LABELS.md | 9 +-- docs/maintainers/PULL_REQUESTS.md | 17 +++--- docs/maintainers/RELEASES.md | 10 ++++ 17 files changed, 173 insertions(+), 113 deletions(-) diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 30b2c643de8f6..44def8c9273bf 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -1,8 +1,8 @@ -### Processor Plugins +# Processor Plugins This section is for developers who want to create a new processor plugin. -### Processor Plugin Guidelines +## Processor Plugin Guidelines * A processor must conform to the [telegraf.Processor][] interface. * Processors should call `processors.Add` in their `init` function to register @@ -12,13 +12,13 @@ This section is for developers who want to create a new processor plugin. * The `SampleConfig` function should return valid toml that describes how the processor can be configured. This is include in the output of `telegraf config`. -- The `SampleConfig` function should return valid toml that describes how the +* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this processor does. -- Follow the recommended [Code Style][]. +* Follow the recommended [Code Style][]. -### Processor Plugin Example +## Processor Plugin Example ```go package printer @@ -26,47 +26,47 @@ package printer // printer.go import ( - "fmt" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) type Printer struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` ` func (p *Printer) SampleConfig() string { - return sampleConfig + return sampleConfig } func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." + return "Print all metrics that pass through this filter." } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - fmt.Println(metric.String()) - } - return in + for _, metric := range in { + fmt.Println(metric.String()) + } + return in } func init() { - processors.Add("printer", func() telegraf.Processor { - return &Printer{} - }) + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) } ``` -### Streaming Processors +## Streaming Processors Streaming processors are a new processor type available to you. They are particularly useful to implement processor types that use background processes @@ -84,7 +84,7 @@ Some differences from classic Processors: * Processors should call `processors.AddStreaming` in their `init` function to register themselves. See below for a quick example. -### Streaming Processor Example +## Streaming Processor Example ```go package printer @@ -92,30 +92,30 @@ package printer // printer.go import ( - "fmt" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) type Printer struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` ` func (p *Printer) SampleConfig() string { - return sampleConfig + return sampleConfig } func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." + return "Print all metrics that pass through this filter." } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } // Start is called once when the plugin starts; it is only called once per @@ -135,13 +135,13 @@ func (p *Printer) Start(acc telegraf.Accumulator) error { // Metrics you don't want to pass downstream should have metric.Drop() called, // rather than simply omitting the acc.AddMetric() call func (p *Printer) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { - // print! - fmt.Println(metric.String()) - // pass the metric downstream, or metric.Drop() it. - // Metric will be dropped if this function returns an error. - acc.AddMetric(metric) + // print! + fmt.Println(metric.String()) + // pass the metric downstream, or metric.Drop() it. + // Metric will be dropped if this function returns an error. + acc.AddMetric(metric) - return nil + return nil } // Stop gives you an opportunity to gracefully shut down the processor. @@ -154,9 +154,9 @@ func (p *Printer) Stop() error { } func init() { - processors.AddStreaming("printer", func() telegraf.StreamingProcessor { - return &Printer{} - }) + processors.AddStreaming("printer", func() telegraf.StreamingProcessor { + return &Printer{} + }) } ``` diff --git a/docs/PROFILING.md b/docs/PROFILING.md index a0851c8f18b12..428158e690576 100644 --- a/docs/PROFILING.md +++ b/docs/PROFILING.md @@ -6,7 +6,7 @@ By default, the profiling is turned off. To enable profiling you need to specify address to config parameter `pprof-addr`, for example: -``` +```shell telegraf --config telegraf.conf --pprof-addr localhost:6060 ``` @@ -21,4 +21,3 @@ or to look at a 30-second CPU profile: `go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30` To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser. - diff --git a/docs/README.md b/docs/README.md index 99320dee95588..431118259ebce 100644 --- a/docs/README.md +++ b/docs/README.md @@ -21,4 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md -[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file +[nightlies]: /docs/NIGHTLIES.md diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 81049fcee9f99..6a187d0fa0c08 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -5,7 +5,7 @@ might change between versions. Please check the driver documentation for availab database | driver | aliases | example DSN | comment ---------------------| ------------------------------------------------------| --------------- | -------------------------------------------------------------------------------------- | ------- -CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres
pgx | see _postgres_ driver | uses PostgresQL driver +CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information @@ -16,28 +16,35 @@ TiDB | [tidb](https://github.com/go-sql-driver/mysql) | m ## Comments ### Driver aliases + Some database drivers are supported though another driver (e.g. CockroachDB). For other databases we provide a more obvious name (e.g. postgres) compared to the driver name. For all of those drivers you might use an _alias_ name during configuration. ### Example data-source-name DSN + The given examples are just that, so please check the driver documentation for the exact format and available options and parameters. Please note that the format of a DSN might also change between driver version. ### Type conversions + Telegraf relies on type conversion of the database driver and/or the golang sql framework. In case you find any problem, please open an issue! ## Help + If nothing seems to work, you might find help in the telegraf forum or in the chat. ### The documentation is wrong + Please open an issue or even better send a pull-request! ### I found a bug + Please open an issue or even better send a pull-request! ### My database is not supported + We currently cannot support CGO drivers in telegraf! Please check if a **pure Go** driver for the [golang sql framework](https://golang.org/pkg/database/sql/) exists. If you found such a driver, please let us know by opening an issue or even better by sending a pull-request! diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md index 42a5abea56f30..74443a24bbd2a 100644 --- a/docs/TEMPLATE_PATTERN.md +++ b/docs/TEMPLATE_PATTERN.md @@ -4,7 +4,8 @@ Template patterns are a mini language that describes how a dot delimited string should be mapped to and from [metrics][]. A template has the form: -``` + +```text "host.mytag.mytag.measurement.measurement.field*" ``` @@ -25,9 +26,9 @@ can also be specified multiple times. **NOTE:** `measurement` must be specified in your template. **NOTE:** `field*` cannot be used in conjunction with `measurement*`. -### Examples +## Examples -#### Measurement & Tag Templates +### Measurement & Tag Templates The most basic template is to specify a single transformation to apply to all incoming metrics. So the following template: @@ -40,7 +41,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text us.west.cpu.load 100 => cpu.load,region=us.west value=100 ``` @@ -55,7 +56,7 @@ templates = [ ] ``` -#### Field Templates +### Field Templates The field keyword tells Telegraf to give the metric that field name. So the following template: @@ -69,7 +70,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.percent.eu-east 100 => cpu_usage,region=eu-east idle_percent=100 ``` @@ -86,12 +87,12 @@ templates = [ which would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.eu-east.idle.percentage 100 => cpu_usage,region=eu-east idle_percentage=100 ``` -#### Filter Templates +### Filter Templates Users can also filter the template(s) to use based on the name of the bucket, using glob matching, like so: @@ -105,7 +106,7 @@ templates = [ which would result in the following transformation: -``` +```text cpu.load.eu-east 100 => cpu_load,region=eu-east value=100 @@ -113,7 +114,7 @@ mem.cached.localhost 256 => mem_cached,host=localhost value=256 ``` -#### Adding Tags +### Adding Tags Additional tags can be added to a metric that don't exist on the received metric. You can add additional tags by specifying them after the pattern. @@ -128,7 +129,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.eu-east 100 => cpu_usage,region=eu-east,datacenter=1a idle=100 ``` diff --git a/docs/TLS.md b/docs/TLS.md index 74b2512f1e59d..133776b7faf73 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -5,9 +5,10 @@ possible, plugins will provide the standard settings described below. With the exception of the advanced configuration available TLS settings will be documented in the sample configuration. -### Client Configuration +## Client Configuration For client TLS support we have the following options: + ```toml ## Root certificates for verifying server certificates encoded in PEM format. # tls_ca = "/etc/telegraf/ca.pem" @@ -52,23 +53,23 @@ for the interest of brevity. ## Define list of allowed ciphers suites. If not defined the default ciphers ## supported by Go will be used. ## ex: tls_cipher_suites = [ -## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", -## "TLS_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_RSA_WITH_AES_128_CBC_SHA", -## "TLS_RSA_WITH_AES_256_CBC_SHA" +## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +## "TLS_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_RSA_WITH_AES_128_CBC_SHA", +## "TLS_RSA_WITH_AES_256_CBC_SHA" ## ] # tls_cipher_suites = [] @@ -80,6 +81,7 @@ for the interest of brevity. ``` Cipher suites for use with `tls_cipher_suites`: + - `TLS_RSA_WITH_RC4_128_SHA` - `TLS_RSA_WITH_3DES_EDE_CBC_SHA` - `TLS_RSA_WITH_AES_128_CBC_SHA` @@ -107,6 +109,7 @@ Cipher suites for use with `tls_cipher_suites`: - `TLS_CHACHA20_POLY1305_SHA256` TLS versions for use with `tls_min_version` or `tls_max_version`: + - `TLS10` - `TLS11` - `TLS12` diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index b0b6ee5adf358..fe77a16bf7475 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -9,29 +9,31 @@ the general steps to set it up. 3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf` 4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""): - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install ``` 5. Edit the configuration file to meet your needs 6. To check that it works, run: - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test ``` 7. To start collecting data, run: - ``` + ```shell > net start telegraf ``` ## Config Directory You can also specify a `--config-directory` for the service to use: + 1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d` 2. Include the `--config-directory` option when registering the service: - ``` + + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d ``` @@ -54,7 +56,7 @@ filtering options. However, if you do need to run multiple telegraf instances on a single system, you can install the service with the `--service-name` and `--service-display-name` flags to give the services unique names: -``` +```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1" > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" ``` @@ -64,7 +66,7 @@ on a single system, you can install the service with the `--service-name` and When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application -**Troubleshooting common error #1067** +### common error #1067 When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start diff --git a/docs/developers/CODE_STYLE.md b/docs/developers/CODE_STYLE.md index 1bbb2b14d84c4..61485aa8c8f98 100644 --- a/docs/developers/CODE_STYLE.md +++ b/docs/developers/CODE_STYLE.md @@ -1,7 +1,8 @@ # Code Style + Code is required to be formatted using `gofmt`, this covers most code style requirements. It is also highly recommended to use `goimports` to automatically order imports. -Please try to keep lines length under 80 characters, the exact number of +Please try to keep lines length under 80 characters, the exact number of characters is not strict but it generally helps with readability. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md index a3da79a5ac8e8..fe262eeed4bd2 100644 --- a/docs/developers/DEPRECATION.md +++ b/docs/developers/DEPRECATION.md @@ -1,4 +1,5 @@ # Deprecation + Deprecation is the primary tool for making changes in Telegraf. A deprecation indicates that the community should move away from using a feature, and documents that the feature will be removed in the next major update (2.0). @@ -36,14 +37,17 @@ Add the deprecation warning to the plugin's README: Log a warning message if the plugin is used. If the plugin is a ServiceInput, place this in the `Start()` function, for regular Input's log it only the first time the `Gather` function is called. + ```go log.Println("W! [inputs.logparser] The logparser plugin is deprecated in 1.10. " + - "Please use the tail plugin with the grok data_format as a replacement.") + "Please use the tail plugin with the grok data_format as a replacement.") ``` + ## Deprecate options Mark the option as deprecated in the sample config, include the deprecation version and any replacement. + ```toml ## Broker URL ## deprecated in 1.7; use the brokers option @@ -54,17 +58,19 @@ In the plugins configuration struct, mention that the option is deprecated: ```go type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers + URL string `toml:"url"` // deprecated in 1.7; use brokers } ``` Finally, use the plugin's `Init() error` method to display a log message at warn level. The message should include the offending configuration option and any suggested replacement: + ```go func (a *AMQPConsumer) Init() error { - if p.URL != "" { - p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") - } - return nil + if p.URL != "" { + p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") + } + + return nil } ``` diff --git a/docs/developers/LOGGING.md b/docs/developers/LOGGING.md index 60de15699a6e8..e009968c4df36 100644 --- a/docs/developers/LOGGING.md +++ b/docs/developers/LOGGING.md @@ -8,12 +8,13 @@ need to be specified for each log call. ```go type MyPlugin struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } ``` You can then use this Logger in the plugin. Use the method corresponding to the log level of the message. + ```go p.Log.Errorf("Unable to write to file: %v", err) ``` @@ -22,6 +23,7 @@ p.Log.Errorf("Unable to write to file: %v", err) In other sections of the code it is required to add the log level and module manually: + ```go log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) ``` @@ -37,6 +39,7 @@ support setting the log level on a per module basis, it is especially important to not over do it with debug logging. If the plugin is listening on a socket, log a message with the address of the socket: + ```go p.log.InfoF("Listening on %s://%s", protocol, l.Addr()) ``` @@ -59,6 +62,7 @@ normal on some systems. The log level is indicated by a single character at the start of the log message. Adding this prefix is not required when using the Plugin Logger. + - `D!` Debug - `I!` Info - `W!` Warning diff --git a/docs/developers/METRIC_FORMAT_CHANGES.md b/docs/developers/METRIC_FORMAT_CHANGES.md index 32bfe0a2db5a7..7d6477c253aca 100644 --- a/docs/developers/METRIC_FORMAT_CHANGES.md +++ b/docs/developers/METRIC_FORMAT_CHANGES.md @@ -3,14 +3,17 @@ When making changes to an existing input plugin, care must be taken not to change the metric format in ways that will cause trouble for existing users. This document helps developers understand how to make metric format changes safely. ## Changes can cause incompatibilities + If the metric format changes, data collected in the new format can be incompatible with data in the old format. Database queries designed around the old format may not work with the new format. This can cause application failures. Some metric format changes don't cause incompatibilities. Also, some unsafe changes are necessary. How do you know what changes are safe and what to do if your change isn't safe? ## Guidelines + The main guideline is just to keep compatibility in mind when making changes. Often developers are focused on making a change that fixes their particular problem and they forget that many people use the existing code and will upgrade. When you're coding, keep existing users and applications in mind. ### Renaming, removing, reusing + Database queries refer to the metric and its tags and fields by name. Any Telegraf code change that changes those names has the potential to break an existing query. Similarly, removing tags or fields can break queries. Changing the meaning of an existing tag value or field value or reusing an existing one in a new way isn't safe. Although queries that use these tags/field may not break, they will not work as they did before the change. @@ -18,9 +21,11 @@ Changing the meaning of an existing tag value or field value or reusing an exist Adding a field doesn't break existing queries. Queries that select all fields and/or tags (like "select * from") will return an extra series but this is often useful. ### Performance and storage + Time series databases can store large amounts of data but many of them don't perform well on high cardinality data. If a metric format change includes a new tag that holds high cardinality data, database performance could be reduced enough to cause existing applications not to work as they previously did. Metric format changes that dramatically increase the number of tags or fields of a metric can increase database storage requirements unexpectedly. Both of these types of changes are unsafe. ### Make unsafe changes opt-in + If your change has the potential to seriously affect existing users, the change must be opt-in. To do this, add a plugin configuration setting that lets the user select the metric format. Make the setting's default value select the old metric format. When new users add the plugin they can choose the new format and get its benefits. When existing users upgrade, their config files won't have the new setting so the default will ensure that there is no change. When adding a setting, avoid using a boolean and consider instead a string or int for future flexibility. A boolean can only handle two formats but a string can handle many. For example, compare use_new_format=true and features=["enable_foo_fields"]; the latter is much easier to extend and still very descriptive. @@ -28,6 +33,7 @@ When adding a setting, avoid using a boolean and consider instead a string or in If you want to encourage existing users to use the new format you can log a warning once on startup when the old format is selected. The warning should tell users in a gentle way that they can upgrade to a better metric format. If it doesn't make sense to maintain multiple metric formats forever, you can change the default on a major release or even remove the old format completely. See [[Deprecation]] for details. ### Utility + Changes should be useful to many or most users. A change that is only useful for a small number of users may not accepted, even if it's off by default. ## Summary table @@ -39,4 +45,5 @@ Changes should be useful to many or most users. A change that is only useful fo | field | unsafe | unsafe | ok as long as it's useful for existing users and is worth the added space | ## References + InfluxDB Documentation: "Schema and data layout" diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index 000479c94ce42..b8d4d1739f0b2 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -21,12 +21,14 @@ building the rpm/deb as it is less system dependent. Pull the CI images from quay, the version corresponds to the version of Go that is used to build the binary: -``` + +```shell docker pull quay.io/influxdb/telegraf-ci:1.9.7 ``` Start a shell in the container: -``` + +```shell docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash ``` @@ -42,6 +44,7 @@ From within the container: * Change `include_packages` to change what package you want, run `make help` to see possible values From the host system, copy the build artifacts out of the container: -``` + +```shell docker cp romantic_ptolemy:/go/src/github.com/influxdata/telegraf/build/telegraf-1.10.2-1.x86_64.rpm . ``` diff --git a/docs/developers/PROFILING.md b/docs/developers/PROFILING.md index 81cdf1980304d..c1f02e4080d4c 100644 --- a/docs/developers/PROFILING.md +++ b/docs/developers/PROFILING.md @@ -1,20 +1,24 @@ # Profiling + This article describes how to collect performance traces and memory profiles from Telegraf. If you are submitting this for an issue, please include the version.txt generated below. Use the `--pprof-addr` option to enable the profiler, the easiest way to do this may be to add this line to `/etc/default/telegraf`: -``` + +```shell TELEGRAF_OPTS="--pprof-addr localhost:6060" ``` Restart Telegraf to activate the profile address. -#### Trace Profile +## Trace Profile + Collect a trace during the time where the performance issue is occurring. This example collects a 10 second trace and runs for 10 seconds: -``` + +```shell curl 'http://localhost:6060/debug/pprof/trace?seconds=10' > trace.bin telegraf --version > version.txt go env GOOS GOARCH >> version.txt @@ -22,34 +26,41 @@ go env GOOS GOARCH >> version.txt The `trace.bin` and `version.txt` files can be sent in for analysis or, if desired, you can analyze the trace with: -``` + +```shell go tool trace trace.bin ``` -#### Memory Profile +## Memory Profile + Collect a heap memory profile: -``` + +```shell curl 'http://localhost:6060/debug/pprof/heap' > mem.prof telegraf --version > version.txt go env GOOS GOARCH >> version.txt ``` Analyze: -``` + +```shell $ go tool pprof mem.prof (pprof) top5 ``` -#### CPU Profile +## CPU Profile + Collect a 30s CPU profile: -``` + +```shell curl 'http://localhost:6060/debug/pprof/profile' > cpu.prof telegraf --version > version.txt go env GOOS GOARCH >> version.txt ``` Analyze: -``` + +```shell go tool pprof cpu.prof (pprof) top5 ``` diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md index d0969212fecb2..2f67535de54b2 100644 --- a/docs/developers/SAMPLE_CONFIG.md +++ b/docs/developers/SAMPLE_CONFIG.md @@ -5,13 +5,15 @@ The sample config file is generated from a results of the `SampleConfig()` and You can generate a full sample config: -``` + +```shell telegraf config ``` You can also generate the config for a particular plugin using the `-usage` option: -``` + +```shell telegraf --usage influxdb ``` @@ -21,6 +23,7 @@ In the config file we use 2-space indention. Since the config is [TOML](https://github.com/toml-lang/toml) the indention has no meaning. Documentation is double commented, full sentences, and ends with a period. + ```toml ## This text describes what an the exchange_type option does. # exchange_type = "topic" @@ -29,14 +32,15 @@ Documentation is double commented, full sentences, and ends with a period. Try to give every parameter a default value whenever possible. If an parameter does not have a default or must frequently be changed then have it uncommented. + ```toml ## Brokers are the AMQP brokers to connect to. brokers = ["amqp://localhost:5672"] ``` - Options where the default value is usually sufficient are normally commented out. The commented out value is the default. + ```toml ## What an exchange type is. # exchange_type = "topic" @@ -44,6 +48,7 @@ out. The commented out value is the default. If you want to show an example of a possible setting filled out that is different from the default, show both: + ```toml ## Static routing key. Used when no routing_tag is set or as a fallback ## when the tag specified in routing tag is not found. @@ -53,6 +58,7 @@ different from the default, show both: Unless parameters are closely related, add a space between them. Usually parameters is closely related have a single description. + ```toml ## If true, queue will be declared as an exclusive queue. # queue_exclusive = false diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md index 1ee6cc7517c74..5b8b8bb216796 100644 --- a/docs/maintainers/LABELS.md +++ b/docs/maintainers/LABELS.md @@ -26,6 +26,7 @@ For bugs you may want to add `panic`, `regression`, or `upstream` to provide further detail. Summary of Labels: + | Label | Description | Purpose | | --- | ----------- | ---| | `area/*` | These labels each corresponding to a plugin or group of plugins that can be added to identify the affected plugin or group of plugins | categorization | @@ -40,9 +41,9 @@ Summary of Labels: | `good first issue` | This is a smaller issue suited for getting started in Telegraf, Golang, and contributing to OSS | community | | `help wanted` | Request for community participation, code, contribution | community | | `need more info` | Issue triaged but outstanding questions remain | community | -| `performance` | Issues or PRs that address performance issues | categorization| +| `performance` | Issues or PRs that address performance issues | categorization| | `platform/*` | Issues that only apply to one platform | categorization | -| `plugin/*` | 1. Request for new * plugins 2. Issues/PRs that are related to * plugins | categorization | +| `plugin/*` | Request for new plugins and issues/PRs that are related to plugins | categorization | | `ready for final review` | Pull request has been reviewed and/or tested by multiple users and is ready for a final review | triage | | `rfc` | Request for comment - larger topics of discussion that are looking for feedback | community | | `support` |Telegraf questions, may be directed to community site or slack | triage | @@ -66,7 +67,3 @@ We close issues for the following reasons: | `closed/not-reproducible` | Given the information we have we can't reproduce the issue | | `closed/out-of-scope` | The feature request is out of scope for Telegraf - highly unlikely to be worked on | | `closed/question` | This issue is a support question, directed to community site or slack | - - - - diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md index 90c49fd5af689..5a627d4cc29ec 100644 --- a/docs/maintainers/PULL_REQUESTS.md +++ b/docs/maintainers/PULL_REQUESTS.md @@ -2,7 +2,7 @@ ## Before Review -Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The +Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The only exemption would be non-copyrightable changes such as fixing a typo. Check that all tests are passing. Due to intermittent errors in the CI tests @@ -36,13 +36,15 @@ history and this method allows us to normalize commit messages as well as simplifies backporting. ### Rewriting the commit message + After selecting "Squash and Merge" you may need to rewrite the commit message. Usually the body of the commit messages should be cleared as well, unless it -is well written and applies to the entire changeset. -- Use imperative present tense for the first line of the message: - - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") -- The default merge commit messages include the PR number at the end of the -commit message, keep this in the final message. +is well written and applies to the entire changeset. + +- Use imperative present tense for the first line of the message: + - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") +- The default merge commit messages include the PR number at the end of the +commit message, keep this in the final message. - If applicable mention the plugin in the message. **Example Enhancement:** @@ -59,7 +61,8 @@ commit message, keep this in the final message. If required, backport the patch and the changelog update to the current release branch. Usually this can be done by cherry picking the commits: -``` + +```shell git cherry-pick -x aaaaaaaa bbbbbbbb ``` diff --git a/docs/maintainers/RELEASES.md b/docs/maintainers/RELEASES.md index 3c05cdf968715..7eb2522cfd0e8 100644 --- a/docs/maintainers/RELEASES.md +++ b/docs/maintainers/RELEASES.md @@ -3,21 +3,25 @@ ## Release Branch On master, update `etc/telegraf.conf` and commit: + ```sh ./telegraf config > etc/telegraf.conf ``` Create the new release branch: + ```sh git checkout -b release-1.15 ``` Push the changes: + ```sh git push origin release-1.15 master ``` Update next version strings on master: + ```sh git checkout master echo 1.16.0 > build_version.txt @@ -29,6 +33,7 @@ Release candidates are created only for new minor releases (ex: 1.15.0). Tags are created but some of the other tasks, such as adding a changelog entry are skipped. Packages are added to the github release page and posted to community but are not posted to package repos or docker hub. + ```sh git checkout release-1.15 git commit --allow-empty -m "Telegraf 1.15.0-rc1" @@ -40,6 +45,7 @@ git push origin release-1.15 v1.15.0-rc1 On master, set the release date in the changelog and cherry-pick the change back: + ```sh git checkout master vi CHANGELOG.md @@ -52,6 +58,7 @@ Double check that the changelog was applied as desired, or fix it up and amend the change before pushing. Tag the release: + ```sh git checkout release-1.8 # This just improves the `git show 1.8.0` output @@ -61,6 +68,7 @@ git tag -s v1.8.0 -m "Telegraf 1.8.0" Check that the version was set correctly, the tag can always be altered if a mistake is made but only before you push it to Github: + ```sh make ./telegraf --version @@ -69,6 +77,7 @@ Telegraf v1.8.0 (git: release-1.8 aaaaaaaa) When you push a branch with a tag to Github, CircleCI will be triggered to build the packages. + ```sh git push origin master release-1.8 v1.8.0 ``` @@ -82,6 +91,7 @@ Update apt and yum repositories hosted at repos.influxdata.com. Update the package signatures on S3, these are used primarily by the docker images. Update docker image [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker): + ```sh cd influxdata-docker git co master From 4605c977daf3083d759149be321b0d535a2564d9 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:47:11 -0700 Subject: [PATCH 069/133] chore: clean up all markdown lint errors in processor plugins (#10157) --- plugins/processors/clone/README.md | 2 +- plugins/processors/converter/README.md | 6 +- plugins/processors/date/README.md | 10 ++- plugins/processors/dedup/README.md | 4 +- plugins/processors/defaults/README.md | 6 +- plugins/processors/enum/README.md | 7 +- plugins/processors/execd/README.md | 100 +++++++++++------------ plugins/processors/filepath/README.md | 33 ++++---- plugins/processors/ifname/README.md | 4 +- plugins/processors/override/README.md | 2 +- plugins/processors/parser/README.md | 15 ++-- plugins/processors/pivot/README.md | 4 +- plugins/processors/port_name/README.md | 4 +- plugins/processors/printer/README.md | 4 +- plugins/processors/regex/README.md | 9 +- plugins/processors/rename/README.md | 6 +- plugins/processors/reverse_dns/README.md | 6 +- plugins/processors/s2geo/README.md | 4 +- plugins/processors/starlark/README.md | 22 ++--- plugins/processors/strings/README.md | 43 ++++++---- plugins/processors/tag_limit/README.md | 4 +- plugins/processors/template/README.md | 7 +- plugins/processors/topk/README.md | 30 +++---- plugins/processors/unpivot/README.md | 5 +- 24 files changed, 181 insertions(+), 156 deletions(-) diff --git a/plugins/processors/clone/README.md b/plugins/processors/clone/README.md index 7ae33d36b235c..837f2d8fc1070 100644 --- a/plugins/processors/clone/README.md +++ b/plugins/processors/clone/README.md @@ -22,7 +22,7 @@ created. A typical use-case is gathering metrics once and cloning them to simulate having several hosts (modifying ``host`` tag). -### Configuration: +## Configuration ```toml # Apply metric modifications using override semantics. diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index 46a2e2ec6390a..96d4546ddf1d2 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -11,7 +11,8 @@ will overwrite one another. **Note on large strings being converted to numeric types:** When converting a string value to a numeric type, precision may be lost if the number is too large. The largest numeric type this plugin supports is `float64`, and if a string 'number' exceeds its size limit, accuracy may be lost. -### Configuration +## Configuration + ```toml # Convert values to another metric value type [[processors.converter]] @@ -46,6 +47,7 @@ will overwrite one another. ### Example Convert `port` tag to a string field: + ```toml [[processors.converter]] [processors.converter.tags] @@ -58,6 +60,7 @@ Convert `port` tag to a string field: ``` Convert all `scboard_*` fields to an integer: + ```toml [[processors.converter]] [processors.converter.fields] @@ -70,6 +73,7 @@ Convert all `scboard_*` fields to an integer: ``` Rename the measurement from a tag value: + ```toml [[processors.converter]] [processors.converter.tags] diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index 9a093fe0e86db..8720c45e5534a 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -5,11 +5,12 @@ Use the `date` processor to add the metric timestamp as a human readable tag. A common use is to add a tag that can be used to group by month or year. A few example usecases include: + 1) consumption data for utilities on per month basis 2) bandwidth capacity per month 3) compare energy production or sales on a yearly or monthly basis -### Configuration +## Configuration ```toml [[processors.date]] @@ -37,16 +38,17 @@ A few example usecases include: # timezone = "UTC" ``` -#### timezone +### timezone On Windows, only the `Local` and `UTC` zones are available by default. To use other timezones, set the `ZONEINFO` environment variable to the location of [`zoneinfo.zip`][zoneinfo]: -``` + +```text set ZONEINFO=C:\zoneinfo.zip ``` -### Example +## Example ```diff - throughput lower=10i,upper=1000i,mean=500i 1560540094000000000 diff --git a/plugins/processors/dedup/README.md b/plugins/processors/dedup/README.md index d0b516c274cf4..4c8338f89069d 100644 --- a/plugins/processors/dedup/README.md +++ b/plugins/processors/dedup/README.md @@ -2,7 +2,7 @@ Filter metrics whose field values are exact repetitions of the previous values. -### Configuration +## Configuration ```toml [[processors.dedup]] @@ -10,7 +10,7 @@ Filter metrics whose field values are exact repetitions of the previous values. dedup_interval = "600s" ``` -### Example +## Example ```diff - cpu,cpu=cpu0 time_idle=42i,time_guest=1i diff --git a/plugins/processors/defaults/README.md b/plugins/processors/defaults/README.md index 55a7eeb46e326..35da9d425c09f 100644 --- a/plugins/processors/defaults/README.md +++ b/plugins/processors/defaults/README.md @@ -10,7 +10,8 @@ There are three cases where this processor will insert a configured default fiel Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration + ```toml ## Set default fields on your metric(s) when they are nil or empty [[processors.defaults]] @@ -22,7 +23,8 @@ Telegraf minimum version: Telegraf 1.15.0 is_error = true ``` -### Example +## Example + Ensure a _status\_code_ field with _N/A_ is inserted in the metric when one is not set in the metric by default: ```toml diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index 0aecaaa430474..873eff1c180e0 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -9,7 +9,7 @@ used for all values, which are not contained in the value_mappings. The processor supports explicit configuration of a destination tag or field. By default the source tag or field is overwritten. -### Configuration: +## Configuration ```toml [[processors.enum]] @@ -25,7 +25,7 @@ source tag or field is overwritten. dest = "status_code" ## Default value to be used for all values not contained in the mapping - ## table. When unset and no match is found, the original field will remain + ## table. When unset and no match is found, the original field will remain ## unmodified and the destination tag or field will not be created. # default = 0 @@ -36,7 +36,7 @@ source tag or field is overwritten. red = 3 ``` -### Example: +## Example ```diff - xyzzy status="green" 1502489900000000000 @@ -44,6 +44,7 @@ source tag or field is overwritten. ``` With unknown value and no default set: + ```diff - xyzzy status="black" 1502489900000000000 + xyzzy status="black" 1502489900000000000 diff --git a/plugins/processors/execd/README.md b/plugins/processors/execd/README.md index 6f8d376a01171..aec4b58126ffb 100644 --- a/plugins/processors/execd/README.md +++ b/plugins/processors/execd/README.md @@ -9,7 +9,7 @@ Program output on standard error is mirrored to the telegraf log. Telegraf minimum version: Telegraf 1.15.0 -### Caveats +## Caveats - Metrics with tracking will be considered "delivered" as soon as they are passed to the external process. There is currently no way to match up which metric @@ -20,7 +20,7 @@ Telegraf minimum version: Telegraf 1.15.0 the requirement that it is serialize-parse symmetrical and does not lose any critical type data. -### Configuration: +## Configuration ```toml [[processors.execd]] @@ -33,9 +33,9 @@ Telegraf minimum version: Telegraf 1.15.0 # restart_delay = "10s" ``` -### Example +## Example -#### Go daemon example +### Go daemon example This go daemon reads a metric from stdin, multiplies the "count" field by 2, and writes the metric back out. @@ -44,55 +44,55 @@ and writes the metric back out. package main import ( - "fmt" - "os" + "fmt" + "os" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/serializers" ) func main() { - parser := influx.NewStreamParser(os.Stdin) - serializer, _ := serializers.NewInfluxSerializer() - - for { - metric, err := parser.Next() - if err != nil { - if err == influx.EOF { - return // stream ended - } - if parseErr, isParseError := err.(*influx.ParseError); isParseError { - fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr) - os.Exit(1) - } - fmt.Fprintf(os.Stderr, "ERR %v\n", err) - os.Exit(1) - } - - c, found := metric.GetField("count") - if !found { - fmt.Fprintf(os.Stderr, "metric has no count field\n") - os.Exit(1) - } - switch t := c.(type) { - case float64: - t *= 2 - metric.AddField("count", t) - case int64: - t *= 2 - metric.AddField("count", t) - default: - fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c) - os.Exit(1) - } - b, err := serializer.Serialize(metric) - if err != nil { - fmt.Fprintf(os.Stderr, "ERR %v\n", err) - os.Exit(1) - } - fmt.Fprint(os.Stdout, string(b)) - } + parser := influx.NewStreamParser(os.Stdin) + serializer, _ := serializers.NewInfluxSerializer() + + for { + metric, err := parser.Next() + if err != nil { + if err == influx.EOF { + return // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "ERR %v\n", err) + os.Exit(1) + } + + c, found := metric.GetField("count") + if !found { + fmt.Fprintf(os.Stderr, "metric has no count field\n") + os.Exit(1) + } + switch t := c.(type) { + case float64: + t *= 2 + metric.AddField("count", t) + case int64: + t *= 2 + metric.AddField("count", t) + default: + fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c) + os.Exit(1) + } + b, err := serializer.Serialize(metric) + if err != nil { + fmt.Fprintf(os.Stderr, "ERR %v\n", err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, string(b)) + } } ``` @@ -103,7 +103,7 @@ to run it, you'd build the binary using go, eg `go build -o multiplier.exe main. command = ["multiplier.exe"] ``` -#### Ruby daemon +### Ruby daemon - See [Ruby daemon](./examples/multiplier_line_protocol/multiplier_line_protocol.rb) diff --git a/plugins/processors/filepath/README.md b/plugins/processors/filepath/README.md index ab3454dcb0c11..c1bb85e8327d6 100644 --- a/plugins/processors/filepath/README.md +++ b/plugins/processors/filepath/README.md @@ -1,3 +1,5 @@ + + # Filepath Processor Plugin The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/) @@ -24,7 +26,7 @@ If you plan to apply multiple transformations to the same `tag`/`field`, bear in Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[processors.filepath]] @@ -58,9 +60,9 @@ Telegraf minimum version: Telegraf 1.15.0 # tag = "path" ``` -### Considerations +## Considerations -#### Clean +### Clean Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions, so there is no need to use it along with them. @@ -83,14 +85,14 @@ Is equivalent to: tag = "path" ``` -#### ToSlash +### ToSlash The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation. -### Examples +## Examples + +### Basename -#### Basename - ```toml [[processors.filepath]] [[processors.filepath.basename]] @@ -102,7 +104,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric,path="ajob.log" duration_seconds=134 1587920425000000000 ``` -#### Dirname +### Dirname ```toml [[processors.filepath]] @@ -116,7 +118,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000 ``` -#### Stem +### Stem ```toml [[processors.filepath]] @@ -129,7 +131,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric,path="ajob" duration_seconds=134 1587920425000000000 ``` -#### Clean +### Clean ```toml [[processors.filepath]] @@ -142,7 +144,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ``` -#### Rel +### Rel ```toml [[processors.filepath]] @@ -156,7 +158,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000 ``` -#### ToSlash +### ToSlash ```toml [[processors.filepath]] @@ -169,7 +171,7 @@ The effects of this function are only noticeable on Windows platforms, because o + my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ``` -### Processing paths from tail plugin +## Processing paths from tail plugin This plugin can be used together with the [tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications @@ -181,9 +183,9 @@ Scenario: written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds` * We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a tag - * We are interested in the filename without its extensions, since it might be enough information for plotting our + * We are interested in the filename without its extensions, since it might be enough information for plotting our execution times in a dashboard - * Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might + * Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might want this information) For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor. @@ -199,7 +201,6 @@ For this purpose, we will use the `tail` input plugin, the `grok` parser plugin [[processors.filepath.stem]] tag = "path" dest = "stempath" - ``` The resulting output for a job taking 70 seconds for the mentioned log file would look like: diff --git a/plugins/processors/ifname/README.md b/plugins/processors/ifname/README.md index d68899db40a53..d22c4de911094 100644 --- a/plugins/processors/ifname/README.md +++ b/plugins/processors/ifname/README.md @@ -4,7 +4,7 @@ The `ifname` plugin looks up network interface names using SNMP. Telegraf minimum version: Telegraf 1.15.0 -### Configuration: +## Configuration ```toml [[processors.ifname]] @@ -66,7 +66,7 @@ Telegraf minimum version: Telegraf 1.15.0 # cache_ttl = "8h" ``` -### Example processing: +## Example Example config: diff --git a/plugins/processors/override/README.md b/plugins/processors/override/README.md index 174663e2b25ab..ff454b9f74a33 100644 --- a/plugins/processors/override/README.md +++ b/plugins/processors/override/README.md @@ -21,7 +21,7 @@ Use-case of this plugin encompass ensuring certain tags or naming conventions are adhered to irrespective of input plugin configurations, e.g. by `taginclude`. -### Configuration: +## Configuration ```toml # Apply metric modifications using override semantics. diff --git a/plugins/processors/parser/README.md b/plugins/processors/parser/README.md index 134bbb59e6f07..7fe74b39f9119 100644 --- a/plugins/processors/parser/README.md +++ b/plugins/processors/parser/README.md @@ -4,6 +4,7 @@ This plugin parses defined fields containing the specified data format and creates new metrics based on the contents of the field. ## Configuration + ```toml [[processors.parser]] ## The name of the fields whose value will be parsed. @@ -23,7 +24,7 @@ creates new metrics based on the contents of the field. data_format = "influx" ``` -### Example: +## Example ```toml [[processors.parser]] @@ -32,14 +33,14 @@ creates new metrics based on the contents of the field. data_format = "logfmt" ``` -**Input**: -``` +### Input + +```text syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",procid="6629",severity_code=6i,timestamp=1533848508138040000i,version=1i ``` -**Output**: -``` +### Output + +```text syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,log_id="09p7QbOG000",lvl="info",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",msg="Executing query",procid="6629",query="SHOW DATABASES",service="query",severity_code=6i,timestamp=1533848508138040000i,ts="2018-08-09T21:01:48.137963Z",version=1i ``` - - diff --git a/plugins/processors/pivot/README.md b/plugins/processors/pivot/README.md index b3eb06fd3f7da..25738232adfc3 100644 --- a/plugins/processors/pivot/README.md +++ b/plugins/processors/pivot/README.md @@ -8,7 +8,7 @@ formats. To perform the reverse operation use the [unpivot] processor. -### Configuration +## Configuration ```toml [[processors.pivot]] @@ -18,7 +18,7 @@ To perform the reverse operation use the [unpivot] processor. value_key = "value" ``` -### Example +## Example ```diff - cpu,cpu=cpu0,name=time_idle value=42i diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md index 3629aff84e90a..34ae2f63d65f0 100644 --- a/plugins/processors/port_name/README.md +++ b/plugins/processors/port_name/README.md @@ -8,7 +8,7 @@ If the source was found in tag, the service name will be added as a tag. If the Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[processors.port_name]] @@ -30,7 +30,7 @@ Telegraf minimum version: Telegraf 1.15.0 # protocol_field = "proto" ``` -### Example +## Example ```diff - measurement,port=80 field=123 1560540094000000000 diff --git a/plugins/processors/printer/README.md b/plugins/processors/printer/README.md index 9a79e16fd3c9b..453690275da7e 100644 --- a/plugins/processors/printer/README.md +++ b/plugins/processors/printer/README.md @@ -2,13 +2,13 @@ The printer processor plugin simple prints every metric passing through it. -### Configuration: +## Configuration ```toml # Print all metrics that pass through this filter. [[processors.printer]] ``` -### Tags: +## Tags No tags are applied by this processor. diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md index 578ed13d067c6..da7482713917b 100644 --- a/plugins/processors/regex/README.md +++ b/plugins/processors/regex/README.md @@ -6,7 +6,7 @@ For tags transforms, if `append` is set to `true`, it will append the transforma For metrics transforms, `key` denotes the element that should be transformed. Furthermore, `result_key` allows control over the behavior applied in case the resulting `tag` or `field` name already exists. -### Configuration: +## Configuration ```toml [[processors.regex]] @@ -74,11 +74,12 @@ For metrics transforms, `key` denotes the element that should be transformed. Fu # replacement = "${1}" ``` -### Tags: +## Tags No tags are applied by this processor. -### Example Output: -``` +## Example + +```text nginx_requests,verb=GET,resp_code=2xx request="/api/search/?category=plugins&q=regex&sort=asc",method="/search/",category="plugins",referrer="-",ident="-",http_version=1.1,agent="UserAgent",client_ip="127.0.0.1",auth="-",resp_bytes=270i 1519652321000000000 ``` diff --git a/plugins/processors/rename/README.md b/plugins/processors/rename/README.md index cc3c61a940640..461d18f46e979 100644 --- a/plugins/processors/rename/README.md +++ b/plugins/processors/rename/README.md @@ -2,7 +2,7 @@ The `rename` processor renames measurements, fields, and tags. -### Configuration: +## Configuration ```toml [[processors.rename]] @@ -24,11 +24,11 @@ The `rename` processor renames measurements, fields, and tags. dest = "max" ``` -### Tags: +## Tags No tags are applied by this processor, though it can alter them by renaming. -### Example processing: +## Example ```diff - network_interface_throughput,hostname=backend.example.com lower=10i,upper=1000i,mean=500i 1502489900000000000 diff --git a/plugins/processors/reverse_dns/README.md b/plugins/processors/reverse_dns/README.md index c8aa0bfdb58e6..f23482beddc26 100644 --- a/plugins/processors/reverse_dns/README.md +++ b/plugins/processors/reverse_dns/README.md @@ -5,7 +5,7 @@ IPs in them. Telegraf minimum version: Telegraf 1.15.0 -### Configuration: +## Configuration ```toml [[processors.reverse_dns]] @@ -55,9 +55,7 @@ Telegraf minimum version: Telegraf 1.15.0 ## processors.converter after this one, specifying the order attribute. ``` - - -### Example processing: +## Example example config: diff --git a/plugins/processors/s2geo/README.md b/plugins/processors/s2geo/README.md index d48947fe67c99..26547203d0259 100644 --- a/plugins/processors/s2geo/README.md +++ b/plugins/processors/s2geo/README.md @@ -4,7 +4,7 @@ Use the `s2geo` processor to add tag with S2 cell ID token of specified [cell le The tag is used in `experimental/geo` Flux package functions. The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal degrees. -### Configuration +## Configuration ```toml [[processors.s2geo]] @@ -20,7 +20,7 @@ The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal d # cell_level = 9 ``` -### Example +## Example ```diff - mta,area=llir,id=GO505_20_2704,status=1 lat=40.878738,lon=-72.517572 1560540094 diff --git a/plugins/processors/starlark/README.md b/plugins/processors/starlark/README.md index 9ca231c5aeb8b..c1391c8c242df 100644 --- a/plugins/processors/starlark/README.md +++ b/plugins/processors/starlark/README.md @@ -14,7 +14,7 @@ functions. Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[processors.starlark]] @@ -25,7 +25,7 @@ Telegraf minimum version: Telegraf 1.15.0 ## Source of the Starlark script. source = ''' def apply(metric): - return metric + return metric ''' ## File containing a Starlark script. @@ -39,7 +39,7 @@ def apply(metric): # debug_mode = true ``` -### Usage +## Usage The Starlark code should contain a function called `apply` that takes a metric as its single argument. The function will be called with each metric, and can @@ -47,7 +47,7 @@ return `None`, a single metric, or a list of metrics. ```python def apply(metric): - return metric + return metric ``` For a list of available types and functions that can be used in the code, see @@ -90,7 +90,8 @@ While Starlark is similar to Python, there are important differences to note: - It is not possible to open files or sockets. - These common keywords are **not supported** in the Starlark grammar: - ``` + + ```text as finally nonlocal assert from raise class global try @@ -102,10 +103,10 @@ While Starlark is similar to Python, there are important differences to note: The ability to load external scripts other than your own is pretty limited. The following libraries are available for loading: -* json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/json). -* log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. -* math: `load("math.star", "math")` provides [the following functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). See [math.star](/plugins/processors/starlark/testdata/math.star) for an example. -* time: `load("time.star", "time")` provides the following functions: `time.from_timestamp()`, `time.is_valid_timezone()`, `time.now()`, `time.parse_duration()`, `time.parseTime()`, `time.time()`. See [time_date.star](/plugins/processors/starlark/testdata/time_date.star), [time_duration.star](/plugins/processors/starlark/testdata/time_duration.star) and/or [time_timestamp.star](/plugins/processors/starlark/testdata/time_timestamp.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/time). +- json: `load("json.star", "json")` provides the following functions: `json.encode()`, `json.decode()`, `json.indent()`. See [json.star](/plugins/processors/starlark/testdata/json.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/json). +- log: `load("logging.star", "log")` provides the following functions: `log.debug()`, `log.info()`, `log.warn()`, `log.error()`. See [logging.star](/plugins/processors/starlark/testdata/logging.star) for an example. +- math: `load("math.star", "math")` provides [the following functions and constants](https://pkg.go.dev/go.starlark.net/lib/math). See [math.star](/plugins/processors/starlark/testdata/math.star) for an example. +- time: `load("time.star", "time")` provides the following functions: `time.from_timestamp()`, `time.is_valid_timezone()`, `time.now()`, `time.parse_duration()`, `time.parseTime()`, `time.time()`. See [time_date.star](/plugins/processors/starlark/testdata/time_date.star), [time_duration.star](/plugins/processors/starlark/testdata/time_duration.star) and/or [time_timestamp.star](/plugins/processors/starlark/testdata/time_timestamp.star) for an example. For more details about the functions, please refer to [the documentation of this library](https://pkg.go.dev/go.starlark.net/lib/time). If you would like to see support for something else here, please open an issue. @@ -167,7 +168,7 @@ def apply(metric): **How can I save values across multiple calls to the script?** -Telegraf freezes the global scope, which prevents it from being modified, except for a special shared global dictionary +Telegraf freezes the global scope, which prevents it from being modified, except for a special shared global dictionary named `state`, this can be used by the `apply` function. See an example of this in [compare with previous metric](/plugins/processors/starlark/testdata/compare_metrics.star) @@ -194,6 +195,7 @@ def apply(metric): def failing(metric): json.decode("non-json-content") ``` + **How to reuse the same script but with different parameters?** In case you have a generic script that you would like to reuse for different instances of the plugin, you can use constants as input parameters of your script. diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md index e0fcec9103151..e735742c96559 100644 --- a/plugins/processors/strings/README.md +++ b/plugins/processors/strings/README.md @@ -3,6 +3,7 @@ The `strings` plugin maps certain go string functions onto measurement, tag, and field values. Values can be modified in place or stored in another key. Implemented functions are: + - lowercase - uppercase - titlecase @@ -22,9 +23,9 @@ Specify the `measurement`, `tag`, `tag_key`, `field`, or `field_key` that you wa If you'd like to apply the change to every `tag`, `tag_key`, `field`, `field_key`, or `measurement`, use the value `"*"` for each respective field. Note that the `dest` field will be ignored if `"*"` is used. -If you'd like to apply multiple processings to the same `tag_key` or `field_key`, note the process order stated above. See [Example 2]() for an example. +If you'd like to apply multiple processings to the same `tag_key` or `field_key`, note the process order stated above. See the second example below for an example. -### Configuration: +## Configuration ```toml [[processors.strings]] @@ -87,16 +88,16 @@ If you'd like to apply multiple processings to the same `tag_key` or `field_key` # replacement = "" ``` -#### Trim, TrimLeft, TrimRight +### Trim, TrimLeft, TrimRight The `trim`, `trim_left`, and `trim_right` functions take an optional parameter: `cutset`. This value is a string containing the characters to remove from the value. -#### TrimPrefix, TrimSuffix +### TrimPrefix, TrimSuffix The `trim_prefix` and `trim_suffix` functions remote the given `prefix` or `suffix` respectively from the string. -#### Replace +### Replace The `replace` function does a substring replacement across the entire string to allow for different conventions between various input and output @@ -106,8 +107,10 @@ Can also be used to eliminate unneeded chars that were in metrics. If the entire name would be deleted, it will refuse to perform the operation and keep the old name. -### Example -**Config** +## Example + +A sample configuration: + ```toml [[processors.strings]] [[processors.strings.lowercase]] @@ -122,18 +125,22 @@ the operation and keep the old name. dest = "cs-host_normalised" ``` -**Input** -``` +Sample input: + +```text iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",http_version=1.1 1519652321000000000 ``` -**Output** -``` +Sample output: + +```text iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",http_version=1.1,cs-host_normalised="MIXEDCASE_HOST" 1519652321000000000 ``` -### Example 2 -**Config** +### Second Example + +A sample configuration: + ```toml [[processors.strings]] [[processors.strings.lowercase]] @@ -145,12 +152,14 @@ iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",http_version=1. new = "_" ``` -**Input** -``` +Sample input: + +```text iis_log,URI-Stem=/API/HealthCheck http_version=1.1 1519652321000000000 ``` -**Output** -``` +Sample output: + +```text iis_log,uri_stem=/API/HealthCheck http_version=1.1 1519652321000000000 ``` diff --git a/plugins/processors/tag_limit/README.md b/plugins/processors/tag_limit/README.md index b287f0f8d4df3..8f8d399f5a84b 100644 --- a/plugins/processors/tag_limit/README.md +++ b/plugins/processors/tag_limit/README.md @@ -8,7 +8,7 @@ This can be useful when dealing with output systems (e.g. Stackdriver) that impose hard limits on the number of tags/labels per metric or where high levels of cardinality are computationally and/or financially expensive. -### Configuration +## Configuration ```toml [[processors.tag_limit]] @@ -19,7 +19,7 @@ levels of cardinality are computationally and/or financially expensive. keep = ["environment", "region"] ``` -### Example +## Example ```diff + throughput month=Jun,environment=qa,region=us-east1,lower=10i,upper=1000i,mean=500i 1560540094000000000 diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md index 348dae096b0c2..b18189f95b2fb 100644 --- a/plugins/processors/template/README.md +++ b/plugins/processors/template/README.md @@ -10,7 +10,7 @@ timestamp using the [interface in `/template_metric.go`](template_metric.go). Read the full [Go Template Documentation][]. -### Configuration +## Configuration ```toml [[processors.template]] @@ -23,9 +23,10 @@ Read the full [Go Template Documentation][]. template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' ``` -### Example +## Example Combine multiple tags to create a single tag: + ```toml [[processors.template]] tag = "topic" @@ -38,6 +39,7 @@ Combine multiple tags to create a single tag: ``` Add measurement name as a tag: + ```toml [[processors.template]] tag = "measurement" @@ -50,6 +52,7 @@ Add measurement name as a tag: ``` Add the year as a tag, similar to the date processor: + ```toml [[processors.template]] tag = "year" diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index cfcb0b2176d38..6d509bbd02fd3 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -4,17 +4,18 @@ The TopK processor plugin is a filter designed to get the top series over a peri This processor goes through these steps when processing a batch of metrics: - 1. Groups measurements in buckets based on their tags and name - 2. Every N seconds, for each bucket, for each selected field: aggregate all the measurements using a given aggregation function (min, sum, mean, etc) and the field. - 3. For each computed aggregation: order the buckets by the aggregation, then returns all measurements in the top `K` buckets +1. Groups measurements in buckets based on their tags and name +2. Every N seconds, for each bucket, for each selected field: aggregate all the measurements using a given aggregation function (min, sum, mean, etc) and the field. +3. For each computed aggregation: order the buckets by the aggregation, then returns all measurements in the top `K` buckets Notes: - * The deduplicates metrics - * The name of the measurement is always used when grouping it - * Depending on the amount of metrics on each bucket, more than `K` series may be returned - * If a measurement does not have one of the selected fields, it is dropped from the aggregation -### Configuration: +* The deduplicates metrics +* The name of the measurement is always used when grouping it +* Depending on the amount of metrics on each bucket, more than `K` series may be returned +* If a measurement does not have one of the selected fields, it is dropped from the aggregation + +## Configuration ```toml [[processors.topk]] @@ -60,18 +61,18 @@ Notes: # add_aggregate_fields = [] ``` -### Tags: +### Tags This processor does not add tags by default. But the setting `add_groupby_tag` will add a tag if set to anything other than "" - -### Fields: +### Fields This processor does not add fields by default. But the settings `add_rank_fields` and `add_aggregation_fields` will add one or several fields if set to anything other than "" - ### Example -**Config** + +Below is an example configuration: + ```toml [[processors.topk]] period = 20 @@ -80,7 +81,8 @@ This processor does not add fields by default. But the settings `add_rank_fields fields = ["cpu_usage"] ``` -**Output difference with topk** +Output difference with topk: + ```diff < procstat,pid=2088,process_name=Xorg cpu_usage=7.296576662282613 1546473820000000000 < procstat,pid=2780,process_name=ibus-engine-simple cpu_usage=0 1546473820000000000 diff --git a/plugins/processors/unpivot/README.md b/plugins/processors/unpivot/README.md index beee6c276608a..375456c40c128 100644 --- a/plugins/processors/unpivot/README.md +++ b/plugins/processors/unpivot/README.md @@ -4,7 +4,7 @@ You can use the `unpivot` processor to rotate a multi field series into single v To perform the reverse operation use the [pivot] processor. -### Configuration +## Configuration ```toml [[processors.unpivot]] @@ -14,7 +14,7 @@ To perform the reverse operation use the [pivot] processor. value_key = "value" ``` -### Example +## Example ```diff - cpu,cpu=cpu0 time_idle=42i,time_user=43i @@ -23,4 +23,3 @@ To perform the reverse operation use the [pivot] processor. ``` [pivot]: /plugins/processors/pivot/README.md - From c172df21a4d0f50e2ff1ef905a050c31746869a6 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:47:23 -0700 Subject: [PATCH 070/133] chore: clean up all markdown lint errors in serializer plugins (#10158) --- plugins/serializers/EXAMPLE_README.md | 11 ++++++----- plugins/serializers/carbon2/README.md | 10 +++++----- plugins/serializers/graphite/README.md | 14 +++++++------- plugins/serializers/influx/README.md | 5 +++-- plugins/serializers/json/README.md | 6 ++++-- plugins/serializers/msgpack/README.md | 14 ++++++-------- plugins/serializers/nowmetric/README.md | 13 +++++++------ plugins/serializers/prometheus/README.md | 15 ++++++++------- .../serializers/prometheusremotewrite/README.md | 8 ++++---- plugins/serializers/splunkmetric/README.md | 10 +++++++++- plugins/serializers/wavefront/README.md | 10 +++++----- 11 files changed, 64 insertions(+), 52 deletions(-) diff --git a/plugins/serializers/EXAMPLE_README.md b/plugins/serializers/EXAMPLE_README.md index 11965c07f1236..7190a7f97b624 100644 --- a/plugins/serializers/EXAMPLE_README.md +++ b/plugins/serializers/EXAMPLE_README.md @@ -1,9 +1,9 @@ -# Example +# Example README This description explains at a high level what the serializer does and provides links to where additional information about the format can be found. -### Configuration +## Configuration This section contains the sample configuration for the serializer. Since the configuration for a serializer is not have a standalone plugin, use the `file` @@ -24,22 +24,23 @@ or `http` outputs as the base config. data_format = "example" ``` -#### example_option +### example_option If an option requires a more expansive explanation than can be included inline in the sample configuration, it may be described here. -### Metrics +## Metrics The optional Metrics section contains details about how the serializer converts Telegraf metrics into output. -### Example +## Example The optional Example section can show an example conversion to the output format using InfluxDB Line Protocol as the reference format. For line delimited text formats a diff may be appropriate: + ```diff - cpu,host=localhost,source=example.org value=42 + cpu|host=localhost|source=example.org|value=42 diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md index 3ad54a1699d3a..b1fdf56c5ab90 100644 --- a/plugins/serializers/carbon2/README.md +++ b/plugins/serializers/carbon2/README.md @@ -30,7 +30,7 @@ The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 f Standard form: -``` +```text metric=name field=field_1 host=foo 30 1234567890 metric=name field=field_2 host=foo 4 1234567890 metric=name field=field_N host=foo 59 1234567890 @@ -51,7 +51,7 @@ after the `_`. This is the behavior of `carbon2_format = "metric_includes_field"` which would make the above example look like: -``` +```text metric=name_field_1 host=foo 30 1234567890 metric=name_field_2 host=foo 4 1234567890 metric=name_field_N host=foo 59 1234567890 @@ -62,7 +62,7 @@ metric=name_field_N host=foo 59 1234567890 In order to sanitize the metric name one can specify `carbon2_sanitize_replace_char` in order to replace the following characters in the metric name: -``` +```text !@#$%^&*()+`'\"[]{};<>,?/\\|= ``` @@ -78,13 +78,13 @@ There will be a `metric` tag that represents the name of the metric and a `field If we take the following InfluxDB Line Protocol: -``` +```text weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890 ``` after serializing in Carbon2, the result would be: -``` +```text metric=weather field=temperature location=us-midwest season=summer 82 1234567890 metric=weather field=wind location=us-midwest season=summer 100 1234567890 ``` diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md index f68765c54ae31..141bb76b3fa81 100644 --- a/plugins/serializers/graphite/README.md +++ b/plugins/serializers/graphite/README.md @@ -5,7 +5,7 @@ template pattern or tag support method. You can select between the two methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support method is used, otherwise the [Template Pattern](templates) is used. -### Configuration +## Configuration ```toml [[outputs.file]] @@ -41,7 +41,7 @@ method is used, otherwise the [Template Pattern](templates) is used. # graphite_separator = "." ``` -#### graphite_tag_support +### graphite_tag_support When the `graphite_tag_support` option is enabled, the template pattern is not used. Instead, tags are encoded using @@ -52,14 +52,17 @@ added in Graphite 1.1. The `metric_path` is a combination of the optional The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. **Example Conversion**: -``` + +```text cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 => cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 ``` + With set option `graphite_separator` to "_" -``` + +```text cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 => cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 @@ -72,7 +75,4 @@ When in `strict` mode Telegraf uses the same rules as metrics when not using tag When in `compatible` mode Telegraf allows more characters through, and is based on the Graphite specification: >Tag names must have a length >= 1 and may contain any ascii characters except `;!^=`. Tag values must also have a length >= 1, they may contain any ascii characters except `;` and the first character must not be `~`. UTF-8 characters may work for names and values, but they are not well tested and it is not recommended to use non-ascii characters in metric names or tags. Metric names get indexed under the special tag name, if a metric name starts with one or multiple ~ they simply get removed from the derived tag value because the ~ character is not allowed to be in the first position of the tag value. If a metric name consists of no other characters than ~, then it is considered invalid and may get dropped. - - - [templates]: /docs/TEMPLATE_PATTERN.md diff --git a/plugins/serializers/influx/README.md b/plugins/serializers/influx/README.md index d21ead8758f38..eae2cb4f0573e 100644 --- a/plugins/serializers/influx/README.md +++ b/plugins/serializers/influx/README.md @@ -4,7 +4,7 @@ The `influx` data format outputs metrics into [InfluxDB Line Protocol][line protocol]. This is the recommended format unless another format is required for interoperability. -### Configuration +## Configuration ```toml [[outputs.file]] @@ -32,10 +32,11 @@ for interoperability. influx_uint_support = false ``` -### Metrics +## Metrics Conversion is direct taking into account some limitations of the Line Protocol format: + - Float fields that are `NaN` or `Inf` are skipped. - Trailing backslash `\` characters are removed from tag keys and values. - Tags with a key or value that is the empty string are skipped. diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index b33875578272a..2bbe8dad95082 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -2,7 +2,7 @@ The `json` output data format converts metrics into JSON documents. -### Configuration +## Configuration ```toml [[outputs.file]] @@ -28,9 +28,10 @@ The `json` output data format converts metrics into JSON documents. #json_timestamp_format = "" ``` -### Examples: +## Examples Standard form: + ```json { "fields": { @@ -50,6 +51,7 @@ Standard form: When an output plugin needs to emit multiple metrics at one time, it may use the batch format. The use of batch format is determined by the plugin, reference the documentation for the specific plugin. + ```json { "metrics": [ diff --git a/plugins/serializers/msgpack/README.md b/plugins/serializers/msgpack/README.md index 5607cc64c05bc..13130b1823742 100644 --- a/plugins/serializers/msgpack/README.md +++ b/plugins/serializers/msgpack/README.md @@ -1,14 +1,12 @@ -# MessagePack: +# MessagePack -MessagePack is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON. +[MessagePack](https://msgpack.org) is an efficient binary serialization format. It lets you exchange data among multiple languages like JSON. -https://msgpack.org - -### Format Definitions: +## Format Definitions Output of this format is MessagePack binary representation of metrics that have identical structure of the below JSON. -``` +```json { "name":"cpu", "time": , // https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type @@ -28,7 +26,7 @@ Output of this format is MessagePack binary representation of metrics that have MessagePack has it's own timestamp representation. You can find additional informations from [MessagePack specification](https://github.com/msgpack/msgpack/blob/master/spec.md#timestamp-extension-type). -### MessagePack Configuration: +## MessagePack Configuration There are no additional configuration options for MessagePack format. @@ -42,4 +40,4 @@ There are no additional configuration options for MessagePack format. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "msgpack" -``` \ No newline at end of file +``` diff --git a/plugins/serializers/nowmetric/README.md b/plugins/serializers/nowmetric/README.md index c1bc22cbe0227..f782f25da8abd 100644 --- a/plugins/serializers/nowmetric/README.md +++ b/plugins/serializers/nowmetric/README.md @@ -7,8 +7,8 @@ If you're using the HTTP output, this serializer knows how to batch the metrics [ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html - An example event looks like: + ```javascript [{ "metric_type": "Disk C: % Free Space", @@ -22,6 +22,7 @@ An example event looks like: "source": “Telegraf” }] ``` + ## Using with the HTTP output To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: @@ -53,7 +54,7 @@ To send this data to a ServiceNow MID Server with Web Server extension activated ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "nowmetric" - + ## Additional HTTP headers [outputs.http.headers] # # Should be set manually to "application/json" for json data_format @@ -61,13 +62,13 @@ To send this data to a ServiceNow MID Server with Web Server extension activated Accept = "application/json" ``` -Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. - -https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html +Starting with the [London release](https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html +), +you also need to explicitly create event rule to allow binding of metric events to host CIs. ## Using with the File output -You can use the file output to output the payload in a file. +You can use the file output to output the payload in a file. In this case, just add the following section to your telegraf config file ```toml diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md index 446def0b46d77..5bf509c97cb43 100644 --- a/plugins/serializers/prometheus/README.md +++ b/plugins/serializers/prometheus/README.md @@ -13,8 +13,7 @@ also update their expiration time based on the most recently received data. If incoming metrics stop updating specific buckets or quantiles but continue reporting others every bucket/quantile will continue to exist. - -### Configuration +## Configuration ```toml [[outputs.file]] @@ -52,18 +51,20 @@ Prometheus labels are produced for each tag. **Note:** String fields are ignored and do not produce Prometheus metrics. -### Example +## Example -**Example Input** -``` +### Example Input + +```text cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000 cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000 cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000 cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000 ``` -**Example Output** -``` +### Example Output + +```text # HELP cpu_time_guest Telegraf collected metric # TYPE cpu_time_guest counter cpu_time_guest{cpu="cpu0"} 9582.54 diff --git a/plugins/serializers/prometheusremotewrite/README.md b/plugins/serializers/prometheusremotewrite/README.md index a0dc4a8deb03b..f44f95203fa46 100644 --- a/plugins/serializers/prometheusremotewrite/README.md +++ b/plugins/serializers/prometheusremotewrite/README.md @@ -9,21 +9,21 @@ somewhat, but not fully, mitigated by using outputs that support writing in "batch format". When using histogram and summary types, it is recommended to use only the `prometheus_client` output. -### Configuration +## Configuration ```toml [[outputs.http]] ## URL is the address to send metrics to url = "https://cortex/api/prom/push" - + ## Optional TLS Config tls_ca = "/etc/telegraf/ca.pem" tls_cert = "/etc/telegraf/cert.pem" tls_key = "/etc/telegraf/key.pem" - + ## Data format to output. data_format = "prometheusremotewrite" - + [outputs.http.headers] Content-Type = "application/x-protobuf" Content-Encoding = "snappy" diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index ba2170d9c4707..feeabbacaaf04 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -8,6 +8,7 @@ If you're using the HTTP output, this serializer knows how to batch the metrics [splunk-format]: http://dev.splunk.com/view/event-collector/SP-CAAAFDN#json An example event looks like: + ```javascript { "time": 1529708430, @@ -22,7 +23,9 @@ An example event looks like: } } ``` + In the above snippet, the following keys are dimensions: + * cpu * dc * user @@ -53,6 +56,7 @@ you can send all of your CPU stats in one JSON struct, an example event looks li } } ``` + In order to enable this mode, there's a new option `splunkmetric_multimetric` that you set in the appropriate output module you plan on using. ## Using with the HTTP output @@ -100,15 +104,18 @@ to manage the HEC authorization, here's a sample config for an HTTP output: ``` ## Overrides + You can override the default values for the HEC token you are using by adding additional tags to the config file. The following aspects of the token can be overridden with tags: + * index * source You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). Such as this example which overrides the index just on the cpu metric: + ```toml [[inputs.cpu]] percpu = false @@ -122,6 +129,7 @@ Such as this example which overrides the index just on the cpu metric: You can use the file output when running telegraf on a machine with a Splunk forwarder. A sample event when `hec_routing` is false (or unset) looks like: + ```javascript { "_value": 0.6, @@ -132,6 +140,7 @@ A sample event when `hec_routing` is false (or unset) looks like: "time": 1529708430 } ``` + Data formatted in this manner can be ingested with a simple `props.conf` file that looks like this: @@ -183,4 +192,3 @@ Splunk supports only numeric field values, so serializer would silently drop met unhealthy = 2 none = 3 ``` - diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 3b72d95b4914c..3ab0fa3979fd1 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -2,7 +2,7 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). -### Configuration +## Configuration ```toml [[outputs.file]] @@ -22,7 +22,7 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro data_format = "wavefront" ``` -### Metrics +## Metrics A Wavefront metric is equivalent to a single field value of a Telegraf measurement. The Wavefront metric name will be: `.` @@ -30,17 +30,17 @@ If a prefix is specified it will be honored. Only boolean and numeric metrics will be serialized, all other types will generate an error. -### Example +## Example The following Telegraf metric -``` +```text cpu,cpu=cpu0,host=testHost user=12,idle=88,system=0 1234567890 ``` will serialize into the following Wavefront metrics -``` +```text "cpu.user" 12.000000 1234567890 source="testHost" "cpu"="cpu0" "cpu.idle" 88.000000 1234567890 source="testHost" "cpu"="cpu0" "cpu.system" 0.000000 1234567890 source="testHost" "cpu"="cpu0" From 0d8d118319d4de8223803b621a3427a420cfc4d6 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:47:33 -0700 Subject: [PATCH 071/133] chore: clean up all markdown lint errors in output plugins (#10159) --- plugins/outputs/amon/README.md | 2 +- plugins/outputs/amqp/README.md | 10 +- .../outputs/application_insights/README.md | 15 +- plugins/outputs/azure_data_explorer/README.md | 190 +++++++++--------- plugins/outputs/azure_monitor/README.md | 55 +++-- plugins/outputs/bigquery/README.md | 13 +- plugins/outputs/cloud_pubsub/README.md | 9 +- plugins/outputs/cloudwatch/README.md | 31 +-- plugins/outputs/cloudwatch_logs/README.md | 43 ++-- plugins/outputs/cratedb/README.md | 1 - plugins/outputs/datadog/README.md | 5 +- plugins/outputs/discard/README.md | 2 +- plugins/outputs/dynatrace/README.md | 8 +- plugins/outputs/elasticsearch/README.md | 18 +- plugins/outputs/exec/README.md | 6 +- plugins/outputs/execd/README.md | 4 +- plugins/outputs/file/README.md | 2 +- plugins/outputs/graphite/README.md | 2 +- plugins/outputs/graylog/README.md | 2 +- plugins/outputs/health/README.md | 7 +- plugins/outputs/http/README.md | 4 +- plugins/outputs/influxdb/README.md | 5 +- plugins/outputs/influxdb_v2/README.md | 6 +- plugins/outputs/instrumental/README.md | 2 +- plugins/outputs/kafka/README.md | 7 +- plugins/outputs/kinesis/README.md | 17 +- plugins/outputs/librato/README.md | 2 +- plugins/outputs/logzio/README.md | 8 +- plugins/outputs/loki/README.md | 4 +- plugins/outputs/mongodb/README.md | 12 +- plugins/outputs/mqtt/README.md | 15 +- plugins/outputs/newrelic/README.md | 7 +- plugins/outputs/nsq/README.md | 2 +- plugins/outputs/opentelemetry/README.md | 7 +- plugins/outputs/opentsdb/README.md | 48 +++-- plugins/outputs/prometheus_client/README.md | 4 +- plugins/outputs/riemann/README.md | 20 +- plugins/outputs/sensu/README.md | 46 ++--- plugins/outputs/signalfx/README.md | 3 +- plugins/outputs/sql/README.md | 2 +- plugins/outputs/stackdriver/README.md | 4 +- plugins/outputs/sumologic/README.md | 12 +- plugins/outputs/syslog/README.md | 5 +- plugins/outputs/timestream/README.md | 25 +-- plugins/outputs/warp10/README.md | 4 +- plugins/outputs/wavefront/README.md | 41 ++-- plugins/outputs/websocket/README.md | 2 +- .../outputs/yandex_cloud_monitoring/README.md | 5 +- 48 files changed, 385 insertions(+), 359 deletions(-) diff --git a/plugins/outputs/amon/README.md b/plugins/outputs/amon/README.md index 3860e4371a50c..57ecf2e185f99 100644 --- a/plugins/outputs/amon/README.md +++ b/plugins/outputs/amon/README.md @@ -6,4 +6,4 @@ for the account. If the point value being sent cannot be converted to a float64, the metric is skipped. -Metrics are grouped by converting any `_` characters to `.` in the Point Name. \ No newline at end of file +Metrics are grouped by converting any `_` characters to `.` in the Point Name. diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index 04715f8e352ad..1c164a10ec21e 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -5,10 +5,12 @@ This plugin writes to a AMQP 0-9-1 Exchange, a prominent implementation of this This plugin does not bind the exchange to a queue. For an introduction to AMQP see: -- https://www.rabbitmq.com/tutorials/amqp-concepts.html -- https://www.rabbitmq.com/getstarted.html -### Configuration: +- [amqp: concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html) +- [rabbitmq: getting started](https://www.rabbitmq.com/getstarted.html) + +## Configuration + ```toml # Publishes metrics to an AMQP broker [[outputs.amqp]] @@ -107,7 +109,7 @@ For an introduction to AMQP see: # data_format = "influx" ``` -#### Routing +### Routing If `routing_tag` is set, and the tag is defined on the metric, the value of the tag is used as the routing key. Otherwise the value of `routing_key` is diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index b23f1affef06f..4beeb1ec85b82 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -2,12 +2,13 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azure.microsoft.com/en-us/services/application-insights/). -### Configuration: +## Configuration + ```toml [[outputs.application_insights]] ## Instrumentation key of the Application Insights resource. instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" - + ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints # endpoint_url = "https://dc.services.visualstudio.com/v2/track" @@ -26,21 +27,21 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur # "ai.cloud.roleInstance" = "kubernetes_pod_name" ``` - -### Metric Encoding: +## Metric Encoding For each field an Application Insights Telemetry record is created named based on the measurement name and field. - **Example:** Create the telemetry records `foo_first` and `foo_second`: -``` + +```text foo,host=a first=42,second=43 1525293034000000000 ``` In the special case of a single field named `value`, a single telemetry record is created named using only the measurement name **Example:** Create a telemetry record `bar`: -``` + +```text bar,host=a value=42 1525293034000000000 ``` diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index db2aba469d292..96193f7fc7f65 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -1,14 +1,14 @@ # Azure Data Explorer output plugin -This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). Azure Data Explorer is a distributed, columnar store, purpose built for any type of logs, metrics and time series data. -## Pre-requisites: +## Pre-requisites + - [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) - VM/compute or container to host Telegraf - it could be hosted locally where an app/service to be monitored is deployed or remotely on a dedicated monitoring compute/container. - -## Configuration: +## Configuration ```toml [[outputs.azure_data_explorer]] @@ -23,16 +23,16 @@ Azure Data Explorer is a distributed, columnar store, purpose built for any type ## Timeout for Azure Data Explorer operations # timeout = "20s" - - ## Type of metrics grouping used when pushing to Azure Data Explorer. - ## Default is "TablePerMetric" for one table per different metric. + + ## Type of metrics grouping used when pushing to Azure Data Explorer. + ## Default is "TablePerMetric" for one table per different metric. ## For more information, please check the plugin README. # metrics_grouping_type = "TablePerMetric" - + ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" - ## Creates tables and relevant mapping if set to true(default). + ## Creates tables and relevant mapping if set to true(default). ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. # create_tables = true ``` @@ -47,60 +47,59 @@ The plugin will group the metrics by the metric name, and will send each group o The table name will match the `name` property of the metric, this means that the name of the metric should comply with the Azure Data Explorer table naming constraints in case you plan to add a prefix to the metric name. - ### SingleTable -The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). - +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). ## Tables Schema The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command generated by the plugin would be like the following: -``` + +```text .create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) ``` The corresponding table mapping would be like the following: -``` + +```text .create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' ``` -**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. ## Authentiation ### Supported Authentication Methods -This plugin provides several types of authentication. The plugin will check the existence of several specific environment variables, and consequently will choose the right method. -These methods are: +This plugin provides several types of authentication. The plugin will check the existence of several specific environment variables, and consequently will choose the right method. +These methods are: 1. AAD Application Tokens (Service Principals with secrets or certificates). For guidance on how to create and register an App in Azure Active Directory check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application), and for more information on the Service Principals check [this article](https://docs.microsoft.com/en-us/azure/active-directory/develop/app-objects-and-service-principals). +2. AAD User Tokens -2. AAD User Tokens - - Allows Telegraf to authenticate like a user. This method is mainly used - for development purposes only. + - Allows Telegraf to authenticate like a user. This method is mainly used for development purposes only. 3. Managed Service Identity (MSI) token - - If you are running Telegraf from Azure VM or infrastructure, then this is the prefered authentication method. + + - If you are running Telegraf from Azure VM or infrastructure, then this is the prefered authentication method. [principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects -Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will -allow the plugin to create the required tables and ingest data into it. +Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will +allow the plugin to create the required tables and ingest data into it. If `create_tables=false` then the designated principal only needs the `Database Ingestor` role at least. - -### Configurations of the chosen Authentication Method +### Configurations of the chosen Authentication Method The plugin will authenticate using the first available of the following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: 1. **Client Credentials**: Azure AD Application ID and Secret. - + Set the following environment variables: - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. @@ -130,73 +129,78 @@ following configurations, **it's important to understand that the assessment, an [msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview [arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview - ## Querying data collected in Azure Data Explorer + Examples of data transformations and queries that would be useful to gain insights - -1. **Data collected using SQL input plugin** - - Sample SQL metrics data - - - name | tags | timestamp | fields - -----|------|-----------|------- - sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} - sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} - - - Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- - - - **Query JSON attributes directly**: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way - - ``` - Tablename - | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 - ``` - ``` - Tablename - | distinct tostring(tags.database_name) - ``` - **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. - - - **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes. - - ``` - // Function to transform data - .create-or-alter function Transform_TargetTableName() { - SourceTableName - | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) - | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp - } - - // Create destination table with above query's results schema (if it doesn't exist already) - .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 - - // Apply update policy on destination table - .alter table TargetTableName policy update - @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' - ``` - -2. **Data collected using syslog input plugin** - - Sample syslog data - - - name | tags | timestamp | fields - -----|------|-----------|------- - syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} - syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} - - There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' - - - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. - ``` - Tablenmae - | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), - SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), - appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) - | project-away fields, tags - ``` - - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. - ``` - Tablename - | evaluate bag_unpack(tags, columnsConflict='replace_source') - | evaluate bag_unpack(fields, columnsConflict='replace_source') - ``` +### Using SQL input plugin + +Sample SQL metrics data - + +name | tags | timestamp | fields +-----|------|-----------|------- +sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} +sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} + +Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- + +1. Query JSON attributes directly: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way: + + ```text + Tablename + | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 + ``` + + ```text + Tablename + | distinct tostring(tags.database_name) + ``` + + **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. + +1. Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes: + + ```json + // Function to transform data + .create-or-alter function Transform_TargetTableName() { + SourceTableName + | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) + | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp + } + + // Create destination table with above query's results schema (if it doesn't exist already) + .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 + + // Apply update policy on destination table + .alter table TargetTableName policy update + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' + ``` + +### Using syslog input plugin + +Sample syslog data - + +name | tags | timestamp | fields +-----|------|-----------|------- +syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} +syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} + +There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + +- Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. + + ```text + Tablenmae + | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), + SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), + appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) + | project-away fields, tags + ``` + +- Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. + + ```text + Tablename + | evaluate bag_unpack(tags, columnsConflict='replace_source') + | evaluate bag_unpack(fields, columnsConflict='replace_source') + ``` diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 9d835c1eb6f4b..8f7bbb9cbfd33 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -14,7 +14,7 @@ metric is written as the Azure Monitor metric name. All field values are written as a summarized set that includes: min, max, sum, count. Tags are written as a dimension on each Azure Monitor metric. -### Configuration: +## Configuration ```toml [[outputs.azure_monitor]] @@ -47,12 +47,12 @@ written as a dimension on each Azure Monitor metric. # endpoint_url = "https://monitoring.core.usgovcloudapi.net" ``` -### Setup +## Setup 1. [Register the `microsoft.insights` resource provider in your Azure subscription][resource provider]. -2. If using Managed Service Identities to authenticate an Azure VM, +1. If using Managed Service Identities to authenticate an Azure VM, [enable system-assigned managed identity][enable msi]. -2. Use a region that supports Azure Monitor Custom Metrics, +1. Use a region that supports Azure Monitor Custom Metrics, For regions with Custom Metrics support, an endpoint will be available with the format `https://.monitoring.azure.com`. @@ -75,17 +75,18 @@ This plugin uses one of several different types of authenticate methods. The preferred authentication methods are different from the *order* in which each authentication is checked. Here are the preferred authentication methods: -1. Managed Service Identity (MSI) token - - This is the preferred authentication method. Telegraf will automatically - authenticate using this method when running on Azure VMs. +1. Managed Service Identity (MSI) token: This is the preferred authentication method. Telegraf will automatically authenticate using this method when running on Azure VMs. 2. AAD Application Tokens (Service Principals) - - Primarily useful if Telegraf is writing metrics for other resources. + + * Primarily useful if Telegraf is writing metrics for other resources. [More information][principal]. - - A Service Principal or User Principal needs to be assigned the `Monitoring + * A Service Principal or User Principal needs to be assigned the `Monitoring Metrics Publisher` role on the resource(s) metrics will be emitted against. + 3. AAD User Tokens (User Principals) - - Allows Telegraf to authenticate like a user. It is best to use this method + + * Allows Telegraf to authenticate like a user. It is best to use this method for development. [principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects @@ -93,30 +94,28 @@ authentication is checked. Here are the preferred authentication methods: The plugin will authenticate using the first available of the following configurations: -1. **Client Credentials**: Azure AD Application ID and Secret. - - Set the following environment variables: +1. **Client Credentials**: Azure AD Application ID and Secret. Set the following environment variables: - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + * `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + * `AZURE_CLIENT_ID`: Specifies the app client ID to use. + * `AZURE_CLIENT_SECRET`: Specifies the app secret to use. -2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. +1. **Client Certificate**: Azure AD Application ID and X.509 Certificate. - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. - - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + * `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + * `AZURE_CLIENT_ID`: Specifies the app client ID to use. + * `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + * `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. -3. **Resource Owner Password**: Azure AD User and Password. This grant type is +1. **Resource Owner Password**: Azure AD User and Password. This grant type is *not recommended*, use device login instead if you need interactive login. - - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. - - `AZURE_CLIENT_ID`: Specifies the app client ID to use. - - `AZURE_USERNAME`: Specifies the username to use. - - `AZURE_PASSWORD`: Specifies the password to use. + * `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + * `AZURE_CLIENT_ID`: Specifies the app client ID to use. + * `AZURE_USERNAME`: Specifies the username to use. + * `AZURE_PASSWORD`: Specifies the password to use. -4. **Azure Managed Service Identity**: Delegate credential management to the +1. **Azure Managed Service Identity**: Delegate credential management to the platform. Requires that code is running in Azure, e.g. on a VM. All configuration is handled by Azure. See [Azure Managed Service Identity][msi] for more details. Only available when using the [Azure Resource Manager][arm]. @@ -127,7 +126,7 @@ following configurations: **Note: As shown above, the last option (#4) is the preferred way to authenticate when running Telegraf on Azure VMs. -### Dimensions +## Dimensions Azure Monitor only accepts values with a numeric type. The plugin will drop fields with a string type by default. The plugin can set all string type fields diff --git a/plugins/outputs/bigquery/README.md b/plugins/outputs/bigquery/README.md index 9515711d50a75..8ca265cc05ea0 100644 --- a/plugins/outputs/bigquery/README.md +++ b/plugins/outputs/bigquery/README.md @@ -1,11 +1,11 @@ # Google BigQuery Output Plugin -This plugin writes to the [Google Cloud BigQuery](https://cloud.google.com/bigquery) and requires [authentication](https://cloud.google.com/bigquery/docs/authentication) +This plugin writes to the [Google Cloud BigQuery](https://cloud.google.com/bigquery) and requires [authentication](https://cloud.google.com/bigquery/docs/authentication) with Google Cloud using either a service account or user credentials. Be aware that this plugin accesses APIs that are [chargeable](https://cloud.google.com/bigquery/pricing) and might incur costs. -### Configuration +## Configuration ```toml [[outputs.bigquery]] @@ -21,17 +21,19 @@ Be aware that this plugin accesses APIs that are [chargeable](https://cloud.goog ## Character to replace hyphens on Metric name # replace_hyphen_to = "_" ``` + Requires `project` to specify where BigQuery entries will be persisted. Requires `dataset` to specify under which BigQuery dataset the corresponding metrics tables reside. -Each metric should have a corresponding table to BigQuery. +Each metric should have a corresponding table to BigQuery. The schema of the table on BigQuery: + * Should contain the field `timestamp` which is the timestamp of a telegraph metrics * Should contain the metric's tags with the same name and the column type should be set to string. * Should contain the metric's fields with the same name and the column type should match the field type. -### Restrictions +## Restrictions Avoid hyphens on BigQuery tables, underlying SDK cannot handle streaming inserts to Table with hyphens. @@ -41,6 +43,7 @@ In case of a metric with hyphen by default hyphens shall be replaced with unders This can be altered using the `replace_hyphen_to` configuration property. Available data type options are: + * integer * float or long * string @@ -50,5 +53,5 @@ All field naming restrictions that apply to BigQuery should apply to the measure Tables on BigQuery should be created beforehand and they are not created during persistence -Pay attention to the column `timestamp` since it is reserved upfront and cannot change. +Pay attention to the column `timestamp` since it is reserved upfront and cannot change. If partitioning is required make sure it is applied beforehand. diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md index d3d5e2fe30063..6274e1dac62cc 100644 --- a/plugins/outputs/cloud_pubsub/README.md +++ b/plugins/outputs/cloud_pubsub/README.md @@ -3,8 +3,7 @@ The GCP PubSub plugin publishes metrics to a [Google Cloud PubSub][pubsub] topic as one of the supported [output data formats][]. - -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage cloud_pubsub`. @@ -24,9 +23,9 @@ generate it using `telegraf --usage cloud_pubsub`. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" ## Optional. If true, will send all metrics per write in one PubSub message. diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index 56436c3c58d73..ff62726de6657 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -1,4 +1,4 @@ -## Amazon CloudWatch Output for Telegraf +# Amazon CloudWatch Output for Telegraf This plugin will send metrics to Amazon CloudWatch. @@ -6,13 +6,14 @@ This plugin will send metrics to Amazon CloudWatch. This plugin uses a credential chain for Authentication with the CloudWatch API endpoint. In the following order the plugin will attempt to authenticate. + 1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified -2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) -3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes -4. Shared profile from `profile` attribute -5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) -6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) -7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +1. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +1. Shared profile from `profile` attribute +1. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +1. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +1. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If left empty, the current timestamp will be used. @@ -31,6 +32,7 @@ must be configured. The region is the Amazon region that you wish to connect to. Examples include but are not limited to: + * us-west-1 * us-west-2 * us-east-1 @@ -43,13 +45,14 @@ The namespace used for AWS CloudWatch metrics. ### write_statistics -If you have a large amount of metrics, you should consider to send statistic -values instead of raw metrics which could not only improve performance but -also save AWS API cost. If enable this flag, this plugin would parse the required -[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) -(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` -aggregator to calculate those fields. If not all statistic fields are available, +If you have a large amount of metrics, you should consider to send statistic +values instead of raw metrics which could not only improve performance but +also save AWS API cost. If enable this flag, this plugin would parse the required +[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) +(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` +aggregator to calculate those fields. If not all statistic fields are available, all fields would still be sent as raw metrics. ### high_resolution_metrics -Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision) \ No newline at end of file + +Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision) diff --git a/plugins/outputs/cloudwatch_logs/README.md b/plugins/outputs/cloudwatch_logs/README.md index ab745d877ff9c..9898f9e842c54 100644 --- a/plugins/outputs/cloudwatch_logs/README.md +++ b/plugins/outputs/cloudwatch_logs/README.md @@ -1,4 +1,4 @@ -## Amazon CloudWatch Logs Output for Telegraf +# Amazon CloudWatch Logs Output for Telegraf This plugin will send logs to Amazon CloudWatch. @@ -6,21 +6,24 @@ This plugin will send logs to Amazon CloudWatch. This plugin uses a credential chain for Authentication with the CloudWatch Logs API endpoint. In the following order the plugin will attempt to authenticate. + 1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified -2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) -3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes -4. Shared profile from `profile` attribute -5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) -6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) -7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +1. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +1. Shared profile from `profile` attribute +1. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +1. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +1. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) + +The IAM user needs the following permissions (see this [reference](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/permissions-reference-cwl.html) for more): -The IAM user needs the following permissions ( https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/permissions-reference-cwl.html): -- `logs:DescribeLogGroups` - required for check if configured log group exist -- `logs:DescribeLogStreams` - required to view all log streams associated with a log group. -- `logs:CreateLogStream` - required to create a new log stream in a log group.) -- `logs:PutLogEvents` - required to upload a batch of log events into log stream. +- `logs:DescribeLogGroups` - required for check if configured log group exist +- `logs:DescribeLogStreams` - required to view all log streams associated with a log group. +- `logs:CreateLogStream` - required to create a new log stream in a log group.) +- `logs:PutLogEvents` - required to upload a batch of log events into log stream. ## Config + ```toml [[outputs.cloudwatch_logs]] ## The region is the Amazon region that you wish to connect to. @@ -50,7 +53,7 @@ The IAM user needs the following permissions ( https://docs.aws.amazon.com/Amazo #role_session_name = "" #profile = "" #shared_credential_file = "" - + ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the ## default. @@ -59,24 +62,24 @@ The IAM user needs the following permissions ( https://docs.aws.amazon.com/Amazo ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place - log_group = "my-group-name" - + log_group = "my-group-name" + ## Log stream in log group ## Either log group name or reference to metric attribute, from which it can be parsed: ## tag: or field:. If log stream is not exist, it will be created. - ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) + ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) log_stream = "tag:location" - + ## Source of log data - metric name ## specify the name of the metric, from which the log data should be retrieved. ## I.e., if you are using docker_log plugin to stream logs from container, then ## specify log_data_metric_name = "docker_log" log_data_metric_name = "docker_log" - + ## Specify from which metric attribute the log data should be retrieved: ## tag: or field:. ## I.e., if you are using docker_log plugin to stream logs from container, then - ## specify log_data_source = "field:message" + ## specify log_data_source = "field:message" log_data_source = "field:message" -``` \ No newline at end of file +``` diff --git a/plugins/outputs/cratedb/README.md b/plugins/outputs/cratedb/README.md index 11214092d26c2..63a9ba4f96fb4 100644 --- a/plugins/outputs/cratedb/README.md +++ b/plugins/outputs/cratedb/README.md @@ -6,7 +6,6 @@ This plugin writes to [CrateDB](https://crate.io/) via its [PostgreSQL protocol] The plugin requires a table with the following schema. - ```sql CREATE TABLE my_metrics ( "hash_id" LONG INDEX OFF, diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index f9dd3fb0ef922..dc709449b081b 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -3,8 +3,7 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an `apikey` which can be obtained [here][apikey] for the account. - -### Configuration +## Configuration ```toml [[outputs.datadog]] @@ -21,7 +20,7 @@ This plugin writes to the [Datadog Metrics API][metrics] and requires an # http_proxy_url = "http://localhost:8888" ``` -### Metrics +## Metrics Datadog metric names are formed by joining the Telegraf metric name and the field key with a `.` character. diff --git a/plugins/outputs/discard/README.md b/plugins/outputs/discard/README.md index e1c70b742450a..c86d389fa15b0 100644 --- a/plugins/outputs/discard/README.md +++ b/plugins/outputs/discard/README.md @@ -3,7 +3,7 @@ This output plugin simply drops all metrics that are sent to it. It is only meant to be used for testing purposes. -### Configuration: +## Configuration ```toml # Send metrics to nowhere at all diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index f25b8708942d6..2776fa23eb169 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -34,13 +34,13 @@ Note: The name and identifier of the host running Telegraf will be added as a di If you run the Telegraf agent on a host or VM without a OneAgent you will need to configure the environment API endpoint to send the metrics to and an API token for security. -You will also need to configure an API token for secure access. Find out how to create a token in the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with +You will also need to configure an API token for secure access. Find out how to create a token in the [Dynatrace documentation](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/) or simply navigate to **Settings > Integration > Dynatrace API** in your Dynatrace environment and create a token with Dynatrace API and create a new token with 'Ingest metrics' (`metrics.ingest`) scope enabled. It is recommended to limit Token scope to only this permission. -The endpoint for the Dynatrace Metrics API v2 is +The endpoint for the Dynatrace Metrics API v2 is -* on Dynatrace Managed: `https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest` -* on Dynatrace SaaS: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest` +- on Dynatrace Managed: `https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest` +- on Dynatrace SaaS: `https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest` ```toml [[outputs.dynatrace]] diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 2616ff1a64d08..41001ee89e282 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -4,7 +4,7 @@ This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Ela It supports Elasticsearch releases from 5.x up to 7.x. -### Elasticsearch indexes and templates +## Elasticsearch indexes and templates ### Indexes per time-frame @@ -12,12 +12,12 @@ This plugin can manage indexes per time-frame, as commonly done in other tools w The timestamp of the metric collected will be used to decide the index destination. -For more information about this usage on Elasticsearch, check https://www.elastic.co/guide/en/elasticsearch/guide/master/time-based.html#index-per-timeframe +For more information about this usage on Elasticsearch, check [the docs](https://www.elastic.co/guide/en/elasticsearch/guide/master/time-based.html#index-per-timeframe). ### Template management Index templates are used in Elasticsearch to define settings and mappings for the indexes and how the fields should be analyzed. -For more information on how this works, see https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html +For more information on how this works, see [the docs](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html). This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields. If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary. @@ -98,7 +98,7 @@ Example of an index template created by telegraf on Elasticsearch 5.x: ``` -### Example events: +### Example events This plugin will format the events in the following way: @@ -144,7 +144,7 @@ This plugin will format the events in the following way: } ``` -### Configuration +## Configuration ```toml [[outputs.elasticsearch]] @@ -201,7 +201,7 @@ This plugin will format the events in the following way: force_document_id = false ``` -#### Permissions +### Permissions If you are using authentication within your Elasticsearch cluster, you need to create a account and create a role with at least the manage role in the @@ -210,7 +210,7 @@ connect to your Elasticsearch cluster and send logs to your cluster. After that, you need to add "create_indice" and "write" permission to your specific index pattern. -#### Required parameters: +### Required parameters * `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance. * `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame. @@ -225,7 +225,7 @@ index pattern. Additionally, you can specify dynamic index names by using tags with the notation ```{{tag_name}}```. This will store the metrics with different tag values in different indices. If the tag does not exist in a particular metric, the `default_tag_value` will be used instead. -#### Optional parameters: +### Optional parameters * `timeout`: Elasticsearch client timeout, defaults to "5s" if not set. * `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option. @@ -237,7 +237,7 @@ Additionally, you can specify dynamic index names by using tags with the notatio * `overwrite_template`: Set to true if you want telegraf to overwrite an existing template. * `force_document_id`: Set to true will compute a unique hash from as sha256(concat(timestamp,measurement,series-hash)),enables resend or update data withoud ES duplicated documents. -### Known issues +## Known issues Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this: diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md index 7e19b9a8475c6..60b4ac385b72d 100644 --- a/plugins/outputs/exec/README.md +++ b/plugins/outputs/exec/README.md @@ -4,13 +4,15 @@ This plugin sends telegraf metrics to an external application over stdin. The command should be defined similar to docker's `exec` form: - ["executable", "param1", "param2"] +```text +["executable", "param1", "param2"] +``` On non-zero exit stderr will be logged at error level. For better performance, consider execd, which runs continuously. -### Configuration +## Configuration ```toml [[outputs.exec]] diff --git a/plugins/outputs/execd/README.md b/plugins/outputs/execd/README.md index 8569c1033fcea..5b2124625565e 100644 --- a/plugins/outputs/execd/README.md +++ b/plugins/outputs/execd/README.md @@ -4,7 +4,7 @@ The `execd` plugin runs an external program as a daemon. Telegraf minimum version: Telegraf 1.15.0 -### Configuration: +## Configuration ```toml [[outputs.execd]] @@ -22,7 +22,7 @@ Telegraf minimum version: Telegraf 1.15.0 data_format = "influx" ``` -### Example +## Example see [examples][] diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index 45d0ac1556c47..2e6a12d976e39 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -2,7 +2,7 @@ This plugin writes telegraf metrics to files -### Configuration +## Configuration ```toml [[outputs.file]] diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md index b6b36cfcab4a0..ddd85278fad8f 100644 --- a/plugins/outputs/graphite/README.md +++ b/plugins/outputs/graphite/README.md @@ -6,7 +6,7 @@ via raw TCP. For details on the translation between Telegraf Metrics and Graphite output, see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) -### Configuration: +## Configuration ```toml # Configuration for Graphite server to send metrics to diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 96e290b09f5a6..d596021487665 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -4,7 +4,7 @@ This plugin writes to a Graylog instance using the "[GELF][]" format. [GELF]: https://docs.graylog.org/en/3.1/pages/gelf.html#gelf-payload-specification -### Configuration: +## Configuration ```toml [[outputs.graylog]] diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md index 0a56d51928ff5..a88417f631ca9 100644 --- a/plugins/outputs/health/README.md +++ b/plugins/outputs/health/README.md @@ -7,7 +7,8 @@ When the plugin is healthy it will return a 200 response; when unhealthy it will return a 503 response. The default state is healthy, one or more checks must fail in order for the resource to enter the failed state. -### Configuration +## Configuration + ```toml [[outputs.health]] ## Address and port to listen on. @@ -48,7 +49,7 @@ must fail in order for the resource to enter the failed state. ## field = "buffer_size" ``` -#### compares +### compares The `compares` check is used to assert basic mathematical relationships. Use it by choosing a field key and one or more comparisons that must hold true. If @@ -56,7 +57,7 @@ the field is not found on a metric no comparison will be made. Comparisons must be hold true on all metrics for the check to pass. -#### contains +### contains The `contains` check can be used to require a field key to exist on at least one metric. diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 9097792628d66..99206a8bb18f4 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -4,7 +4,7 @@ This plugin sends metrics in a HTTP message encoded using one of the output data formats. For data_formats that support batching, metrics are sent in batch format by default. -### Configuration: +## Configuration ```toml # A plugin that can transmit metrics over HTTP @@ -70,6 +70,6 @@ batch format by default. # idle_conn_timeout = 0 ``` -### Optional Cookie Authentication Settings: +### Optional Cookie Authentication Settings The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index 36fde827e176a..6624adfaecec6 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -2,7 +2,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP service. -### Configuration: +## Configuration ```toml # Configuration for sending metrics to InfluxDB @@ -84,7 +84,8 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser # influx_uint_support = false ``` -### Metrics +## Metrics + Reference the [influx serializer][] for details about metric production. [InfluxDB v1.x]: https://github.com/influxdata/influxdb diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md index b176fffcd31e1..3b9ddf6822a78 100644 --- a/plugins/outputs/influxdb_v2/README.md +++ b/plugins/outputs/influxdb_v2/README.md @@ -2,7 +2,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. -### Configuration: +## Configuration ```toml # Configuration for sending metrics to InfluxDB 2.0 @@ -58,8 +58,8 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. # insecure_skip_verify = false ``` -### Metrics - +## Metrics + Reference the [influx serializer][] for details about metric production. [InfluxDB v2.x]: https://github.com/influxdata/influxdb diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md index f8b48fd1ea124..65113aecccee4 100644 --- a/plugins/outputs/instrumental/README.md +++ b/plugins/outputs/instrumental/README.md @@ -7,7 +7,7 @@ Instrumental accepts stats in a format very close to Graphite, with the only dif the type of stat (gauge, increment) is the first token, separated from the metric itself by whitespace. The `increment` type is only used if the metric comes in as a counter through `[[input.statsd]]`. -## Configuration: +## Configuration ```toml [[outputs.instrumental]] diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 54108d8be4398..5f3c2f5eac381 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -2,7 +2,8 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.html) acting a Kafka Producer. -### Configuration: +## Configuration + ```toml [[outputs.kafka]] ## URLs of kafka brokers @@ -80,7 +81,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## 3 : LZ4 ## 4 : ZSTD # compression_codec = 0 - + ## Idempotent Writes ## If enabled, exactly one copy of each message is written. # idempotent_writes = false @@ -146,7 +147,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # data_format = "influx" ``` -#### `max_retry` +### `max_retry` This option controls the number of retries before a failure notification is displayed for each message when no acknowledgement is received from the diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 2d909090b69ad..b5f9422f891d9 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -1,4 +1,4 @@ -## Amazon Kinesis Output for Telegraf +# Amazon Kinesis Output for Telegraf This is an experimental plugin that is still in the early stages of development. It will batch up all of the Points in one Put request to Kinesis. This should save the number of API requests by a considerable level. @@ -13,18 +13,18 @@ maybe useful for users to review Amazons official documentation which is availab This plugin uses a credential chain for Authentication with the Kinesis API endpoint. In the following order the plugin will attempt to authenticate. + 1. Web identity provider credentials via STS if `role_arn` and `web_identity_token_file` are specified -2. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) -3. Explicit credentials from `access_key`, `secret_key`, and `token` attributes -4. Shared profile from `profile` attribute -5. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) -6. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) -7. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) +1. Explicit credentials from `access_key`, `secret_key`, and `token` attributes +1. Shared profile from `profile` attribute +1. [Environment Variables](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#environment-variables) +1. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) +1. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) If you are using credentials from a web identity provider, you can specify the session name using `role_session_name`. If left empty, the current timestamp will be used. - ## Config For this output plugin to function correctly the following variables must be configured. @@ -35,6 +35,7 @@ For this output plugin to function correctly the following variables must be con ### region The region is the Amazon region that you wish to connect to. Examples include but are not limited to + * us-west-1 * us-west-2 * us-east-1 diff --git a/plugins/outputs/librato/README.md b/plugins/outputs/librato/README.md index 731b9dbd2f53e..685c36432896a 100644 --- a/plugins/outputs/librato/README.md +++ b/plugins/outputs/librato/README.md @@ -9,4 +9,4 @@ Point Tags to the API. If the point value being sent cannot be converted to a float64, the metric is skipped. -Currently, the plugin does not send any associated Point Tags. \ No newline at end of file +Currently, the plugin does not send any associated Point Tags. diff --git a/plugins/outputs/logzio/README.md b/plugins/outputs/logzio/README.md index 5cf61233e3274..fd1912e26588f 100644 --- a/plugins/outputs/logzio/README.md +++ b/plugins/outputs/logzio/README.md @@ -2,7 +2,7 @@ This plugin sends metrics to Logz.io over HTTPs. -### Configuration: +## Configuration ```toml # A plugin that can send metrics over HTTPs to Logz.io @@ -30,14 +30,14 @@ This plugin sends metrics to Logz.io over HTTPs. # url = "https://listener.logz.io:8071" ``` -### Required parameters: +### Required parameters * `token`: Your Logz.io token, which can be found under "settings" in your account. -### Optional parameters: +### Optional parameters * `check_disk_space`: Set to true if Logz.io sender checks the disk space before adding metrics to the disk queue. * `disk_threshold`: If the queue_dir space crosses this threshold (in % of disk usage), the plugin will start dropping logs. * `drain_duration`: Time to sleep between sending attempts. * `queue_dir`: Metrics disk path. All the unsent metrics are saved to the disk in this location. -* `url`: Logz.io listener URL. \ No newline at end of file +* `url`: Logz.io listener URL. diff --git a/plugins/outputs/loki/README.md b/plugins/outputs/loki/README.md index 6c7eb91c8916a..400ab71a9b668 100644 --- a/plugins/outputs/loki/README.md +++ b/plugins/outputs/loki/README.md @@ -1,11 +1,11 @@ # Loki Output Plugin -This plugin sends logs to Loki, using metric name and tags as labels, +This plugin sends logs to Loki, using metric name and tags as labels, log line will content all fields in `key="value"` format which is easily parsable with `logfmt` parser in Loki. Logs within each stream are sorted by timestamp before being sent to Loki. -### Configuration: +## Configuration ```toml # A plugin that can transmit logs to Loki diff --git a/plugins/outputs/mongodb/README.md b/plugins/outputs/mongodb/README.md index 0f9ca99730772..05a0cf7f1e194 100644 --- a/plugins/outputs/mongodb/README.md +++ b/plugins/outputs/mongodb/README.md @@ -3,7 +3,7 @@ This plugin sends metrics to MongoDB and automatically creates the collections as time series collections when they don't already exist. **Please note:** Requires MongoDB 5.0+ for Time Series Collections -### Configuration: +## Configuration ```toml # A plugin that can transmit logs to mongodb @@ -33,11 +33,11 @@ This plugin sends metrics to MongoDB and automatically creates the collections a # database to store measurements and time series collections # database = "telegraf" - # granularity can be seconds, minutes, or hours. - # configuring this value will be based on your input collection frequency. + # granularity can be seconds, minutes, or hours. + # configuring this value will be based on your input collection frequency. # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection - # granularity = "seconds" + # granularity = "seconds" # optionally set a TTL to automatically expire documents from the measurement collections. - # ttl = "360h" -``` \ No newline at end of file + # ttl = "360h" +``` diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index f82d7597c5bea..64d8c16b3c443 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,8 +40,8 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false - ## Defines the maximum length of time that the broker and client may not communicate. - ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. # keep_alive = 0 @@ -50,13 +50,14 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt # data_format = "influx" ``` -### Required parameters: +## Required parameters * `servers`: List of strings, this is for speaking to a cluster of `mqtt` brokers. On each flush interval, Telegraf will randomly choose one of the urls to write to. Each URL should just include host and port e.g. -> `["{host}:{port}","{host2}:{port2}"]` -* `topic_prefix`: The `mqtt` topic prefix to publish to. MQTT outputs send metrics to this topic format "///" ( ex: prefix/web01.example.com/mem) -* `qos`: The `mqtt` QoS policy for sending messages. See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm for details. +* `topic_prefix`: The `mqtt` topic prefix to publish to. MQTT outputs send metrics to this topic format `///` ( ex: `prefix/web01.example.com/mem`) +* `qos`: The `mqtt` QoS policy for sending messages. See [these docs](https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm) for details. + +## Optional parameters -### Optional parameters: * `username`: The username to connect MQTT server. * `password`: The password to connect MQTT server. * `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated. @@ -68,4 +69,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) -* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md index e15bedb4bdcb4..54e34e8250f12 100644 --- a/plugins/outputs/newrelic/README.md +++ b/plugins/outputs/newrelic/README.md @@ -6,7 +6,8 @@ To use this plugin you must first obtain an [Insights API Key][]. Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration + ```toml [[outputs.newrelic]] ## New Relic Insights API key @@ -17,13 +18,13 @@ Telegraf minimum version: Telegraf 1.15.0 ## Timeout for writes to the New Relic API. # timeout = "15s" - + ## HTTP Proxy override. If unset use values from the standard ## proxy environment variables to determine proxy, if any. # http_proxy = "http://corporate.proxy:3128" ## Metric URL override to enable geographic location endpoints. - # If not set use values from the standard + # If not set use values from the standard # metric_url = "https://metric-api.newrelic.com/metric/v1" ``` diff --git a/plugins/outputs/nsq/README.md b/plugins/outputs/nsq/README.md index 61b4dad98a107..bf5958d32fe75 100644 --- a/plugins/outputs/nsq/README.md +++ b/plugins/outputs/nsq/README.md @@ -1,4 +1,4 @@ # NSQ Output Plugin This plugin writes to a specified NSQD instance, usually local to the producer. It requires -a `server` name and a `topic` name. \ No newline at end of file +a `server` name and a `topic` name. diff --git a/plugins/outputs/opentelemetry/README.md b/plugins/outputs/opentelemetry/README.md index e6b4ebdfc6aad..1355401908fa6 100644 --- a/plugins/outputs/opentelemetry/README.md +++ b/plugins/outputs/opentelemetry/README.md @@ -2,7 +2,7 @@ This plugin sends metrics to [OpenTelemetry](https://opentelemetry.io) servers and agents via gRPC. -### Configuration +## Configuration ```toml [[outputs.opentelemetry]] @@ -39,11 +39,11 @@ This plugin sends metrics to [OpenTelemetry](https://opentelemetry.io) servers a # key1 = "value1" ``` -#### Schema +### Schema The InfluxDB->OpenTelemetry conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/influx2otel) -are hosted at https://github.com/influxdata/influxdb-observability . +are hosted on [GitHub](https://github.com/influxdata/influxdb-observability). For metrics, two input schemata exist. Line protocol with measurement name `prometheus` is assumed to have a schema @@ -51,6 +51,7 @@ matching [Prometheus input plugin](../../inputs/prometheus/README.md) when `metr Line protocol with other measurement names is assumed to have schema matching [Prometheus input plugin](../../inputs/prometheus/README.md) when `metric_version = 1`. If both schema assumptions fail, then the line protocol data is interpreted as: + - Metric type = gauge (or counter, if indicated by the input plugin) - Metric name = `[measurement]_[field key]` - Metric value = line protocol field value, cast to float diff --git a/plugins/outputs/opentsdb/README.md b/plugins/outputs/opentsdb/README.md index f737d48ae7e94..b89c6c8a5a8b1 100644 --- a/plugins/outputs/opentsdb/README.md +++ b/plugins/outputs/opentsdb/README.md @@ -6,26 +6,26 @@ Using the Http API is the recommended way of writing metrics since OpenTSDB 2.0 To use Http mode, set useHttp to true in config. You can also control how many metrics is sent in each http request by setting batchSize in config. -See http://opentsdb.net/docs/build/html/api_http/put.html for details. +See [the docs](http://opentsdb.net/docs/build/html/api_http/put.html) for details. ## Transfer "Protocol" in the telnet mode The expected input from OpenTSDB is specified in the following way: -``` +```text put ``` The telegraf output plugin adds an optional prefix to the metric keys so that a subamount can be selected. -``` +```text put <[prefix.]metric> ``` ### Example -``` +```text put nine.telegraf.system_load1 1441910356 0.430000 dc=homeoffice host=irimame scope=green put nine.telegraf.system_load5 1441910356 0.580000 dc=homeoffice host=irimame scope=green put nine.telegraf.system_load15 1441910356 0.730000 dc=homeoffice host=irimame scope=green @@ -44,8 +44,6 @@ put nine.telegraf.ping_average_response_ms 1441910366 24.006000 dc=homeoffice ho ... ``` -## - The OpenTSDB telnet interface can be simulated with this reader: ```go @@ -53,28 +51,28 @@ The OpenTSDB telnet interface can be simulated with this reader: package main import ( - "io" - "log" - "net" - "os" + "io" + "log" + "net" + "os" ) func main() { - l, err := net.Listen("tcp", "localhost:4242") - if err != nil { - log.Fatal(err) - } - defer l.Close() - for { - conn, err := l.Accept() - if err != nil { - log.Fatal(err) - } - go func(c net.Conn) { - defer c.Close() - io.Copy(os.Stdout, c) - }(conn) - } + l, err := net.Listen("tcp", "localhost:4242") + if err != nil { + log.Fatal(err) + } + defer l.Close() + for { + conn, err := l.Accept() + if err != nil { + log.Fatal(err) + } + go func(c net.Conn) { + defer c.Close() + io.Copy(os.Stdout, c) + }(conn) + } } ``` diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 844cf3f2d1790..085fc4649af67 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -3,7 +3,7 @@ This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server. -### Configuration +## Configuration ```toml [[outputs.prometheus_client]] @@ -52,7 +52,7 @@ all metrics on `/metrics` (default) to be polled by a Prometheus server. # export_timestamp = false ``` -### Metrics +## Metrics Prometheus metrics are produced in the same manner as the [prometheus serializer][]. diff --git a/plugins/outputs/riemann/README.md b/plugins/outputs/riemann/README.md index 82615728cbabe..d50dc9f9bc045 100644 --- a/plugins/outputs/riemann/README.md +++ b/plugins/outputs/riemann/README.md @@ -2,7 +2,7 @@ This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP. -### Configuration: +## Configuration ```toml # Configuration for Riemann to send metrics to @@ -39,11 +39,11 @@ This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP. # timeout = "5s" ``` -### Required parameters: +### Required parameters * `url`: The full TCP or UDP URL of the Riemann server to send events to. -### Optional parameters: +### Optional parameters * `ttl`: Riemann event TTL, floating-point time in seconds. Defines how long that an event is considered valid for in Riemann. * `separator`: Separator to use between measurement and field name in Riemann service name. @@ -53,24 +53,27 @@ This plugin writes to [Riemann](http://riemann.io/) via TCP or UDP. * `tags`: Additional Riemann tags that will be sent. * `description_text`: Description text for Riemann event. -### Example Events: +## Example Events Riemann event emitted by Telegraf with default configuration: -``` + +```text #riemann.codec.Event{ :host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state nil, :description nil, :ttl 30.0, :service "disk/used_percent", :metric 73.16736001949994, :path "/boot", :fstype "ext4", :time 1475605021} ``` Telegraf emitting the same Riemann event with `measurement_as_attribute` set to `true`: -``` + +```text #riemann.codec.Event{ ... :measurement "disk", :service "used_percent", :metric 73.16736001949994, ... :time 1475605021} ``` Telegraf emitting the same Riemann event with additional Riemann tags defined: -``` + +```text #riemann.codec.Event{ :host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state nil, :description nil, :ttl 30.0, :service "disk/used_percent", :metric 73.16736001949994, :path "/boot", :fstype "ext4", :time 1475605021, @@ -78,7 +81,8 @@ Telegraf emitting the same Riemann event with additional Riemann tags defined: ``` Telegraf emitting a Riemann event with a status text and `string_as_state` set to `true`, and a `description_text` defined: -``` + +```text #riemann.codec.Event{ :host "postgresql-1e612b44-e92f-4d27-9f30-5e2f53947870", :state "Running", :ttl 30.0, :description "PostgreSQL master node is up and running", diff --git a/plugins/outputs/sensu/README.md b/plugins/outputs/sensu/README.md index f21159c6426e0..3d6c7d53d08fa 100644 --- a/plugins/outputs/sensu/README.md +++ b/plugins/outputs/sensu/README.md @@ -1,14 +1,14 @@ # Sensu Go Output Plugin -This plugin writes metrics events to [Sensu Go](https://sensu.io) via its +This plugin writes metrics events to [Sensu Go](https://sensu.io) via its HTTP events API. -### Configuration: +## Configuration ```toml [[outputs.sensu]] - ## BACKEND API URL is the Sensu Backend API root URL to send metrics to - ## (protocol, host, and port only). The output plugin will automatically + ## BACKEND API URL is the Sensu Backend API root URL to send metrics to + ## (protocol, host, and port only). The output plugin will automatically ## append the corresponding backend API path ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). ## @@ -16,34 +16,34 @@ HTTP events API. ## https://docs.sensu.io/sensu-go/latest/api/events/ ## ## AGENT API URL is the Sensu Agent API root URL to send metrics to - ## (protocol, host, and port only). The output plugin will automatically + ## (protocol, host, and port only). The output plugin will automatically ## append the correspeonding agent API path (/events). ## ## Agent API Events API reference: ## https://docs.sensu.io/sensu-go/latest/api/events/ - ## - ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output - ## plugin will use backend_api_url. If backend_api_url and agent_api_url are - ## not provided, the output plugin will default to use an agent_api_url of + ## + ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output + ## plugin will use backend_api_url. If backend_api_url and agent_api_url are + ## not provided, the output plugin will default to use an agent_api_url of ## http://127.0.0.1:3031 - ## + ## # backend_api_url = "http://127.0.0.1:8080" # agent_api_url = "http://127.0.0.1:3031" - ## API KEY is the Sensu Backend API token - ## Generate a new API token via: - ## + ## API KEY is the Sensu Backend API token + ## Generate a new API token via: + ## ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf - ## $ sensuctl user create telegraf --group telegraf --password REDACTED + ## $ sensuctl user create telegraf --group telegraf --password REDACTED ## $ sensuctl api-key grant telegraf ## - ## For more information on Sensu RBAC profiles & API tokens, please visit: + ## For more information on Sensu RBAC profiles & API tokens, please visit: ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ - ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ - ## + ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ + ## # api_key = "${SENSU_API_KEY}" - + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -58,7 +58,7 @@ HTTP events API. ## compress body or "identity" to apply no encoding. # content_encoding = "identity" - ## Sensu Event details + ## Sensu Event details ## ## Below are the event details to be sent to Sensu. The main portions of the ## event are the check, entity, and metrics specifications. For more information @@ -67,7 +67,7 @@ HTTP events API. ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics - ## + ## ## Check specification ## The check name is the name to give the Sensu check associated with the event ## created. This maps to check.metatadata.name in the event. @@ -78,10 +78,10 @@ HTTP events API. ## Configure the entity name and namespace, if necessary. This will be part of ## the entity.metadata in the event. ## - ## NOTE: if the output plugin is configured to send events to a - ## backend_api_url and entity_name is not set, the value returned by + ## NOTE: if the output plugin is configured to send events to a + ## backend_api_url and entity_name is not set, the value returned by ## os.Hostname() will be used; if the output plugin is configured to send - ## events to an agent_api_url, entity_name and entity_namespace are not used. + ## events to an agent_api_url, entity_name and entity_namespace are not used. # [outputs.sensu.entity] # name = "server-01" # namespace = "default" diff --git a/plugins/outputs/signalfx/README.md b/plugins/outputs/signalfx/README.md index 00b39cf30e229..09b7f41db53c4 100644 --- a/plugins/outputs/signalfx/README.md +++ b/plugins/outputs/signalfx/README.md @@ -2,7 +2,8 @@ The SignalFx output plugin sends metrics to [SignalFx](https://docs.signalfx.com/en/latest/). -### Configuration +## Configuration + ```toml [[outputs.signalfx]] ## SignalFx Org Access Token diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 77b89762a7a87..7f8f5da721768 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -65,7 +65,7 @@ through the convert settings. ## Configuration -``` +```toml # Save metrics to an SQL Database [[outputs.sql]] ## Database driver diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 27ef3a09f6f6c..a3c4f82952a8a 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -15,7 +15,7 @@ Metrics are grouped by the `namespace` variable and metric key - eg: `custom.goo Additional resource labels can be configured by `resource_labels`. By default the required `project_id` label is always set to the `project` variable. -### Configuration +## Configuration ```toml [[outputs.stackdriver]] @@ -35,7 +35,7 @@ Additional resource labels can be configured by `resource_labels`. By default th # location = "eu-north0" ``` -### Restrictions +## Restrictions Stackdriver does not support string values in custom metrics, any string fields will not be written. diff --git a/plugins/outputs/sumologic/README.md b/plugins/outputs/sumologic/README.md index 20fb757999a80..4dcc1c7e83b5c 100644 --- a/plugins/outputs/sumologic/README.md +++ b/plugins/outputs/sumologic/README.md @@ -8,11 +8,11 @@ Telegraf minimum version: Telegraf 1.16.0 Currently metrics can be sent using one of the following data formats, supported by Sumologic HTTP Source: - * `graphite` - for Content-Type of `application/vnd.sumologic.graphite` - * `carbon2` - for Content-Type of `application/vnd.sumologic.carbon2` - * `prometheus` - for Content-Type of `application/vnd.sumologic.prometheus` +* `graphite` - for Content-Type of `application/vnd.sumologic.graphite` +* `carbon2` - for Content-Type of `application/vnd.sumologic.carbon2` +* `prometheus` - for Content-Type of `application/vnd.sumologic.prometheus` -### Configuration: +## Configuration ```toml # A plugin that can send metrics to Sumo Logic HTTP metric collector. @@ -23,7 +23,7 @@ by Sumologic HTTP Source: ## Data format to be used for sending metrics. ## This will set the "Content-Type" header accordingly. - ## Currently supported formats: + ## Currently supported formats: ## * graphite - for Content-Type of application/vnd.sumologic.graphite ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus @@ -38,7 +38,7 @@ by Sumologic HTTP Source: ## Timeout used for HTTP request # timeout = "5s" - + ## Max HTTP request body size in bytes before compression (if applied). ## By default 1MB is recommended. ## NOTE: diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index cb9bc8965707f..7b2c480f36e8f 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -8,7 +8,7 @@ The syslog output plugin sends syslog messages transmitted over Syslog messages are formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). -### Configuration +## Configuration ```toml [[outputs.syslog]] @@ -88,7 +88,8 @@ Syslog messages are formatted according to # default_appname = "Telegraf" ``` -### Metric mapping +## Metric mapping + The output plugin expects syslog metrics tags and fields to match up with the ones created in the [syslog input][]. diff --git a/plugins/outputs/timestream/README.md b/plugins/outputs/timestream/README.md index dc063a06854d3..6761ad4da82b0 100644 --- a/plugins/outputs/timestream/README.md +++ b/plugins/outputs/timestream/README.md @@ -2,14 +2,14 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. -### Configuration +## Configuration ```toml # Configuration for sending metrics to Amazon Timestream. [[outputs.timestream]] ## Amazon Region region = "us-east-1" - + ## Amazon Credentials ## Credentials are loaded in the following order ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified @@ -27,7 +27,7 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. #role_session_name = "" #profile = "" #shared_credential_file = "" - + ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the ## default. @@ -40,7 +40,7 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. ## Specifies if the plugin should describe the Timestream database upon starting ## to validate if it has access necessary permissions, connection, etc., as a safety check. - ## If the describe operation fails, the plugin will not start + ## If the describe operation fails, the plugin will not start ## and therefore the Telegraf agent will not start. describe_database_on_start = false @@ -49,17 +49,17 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. ## For example, consider the following data in line protocol format: ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 - ## where weather and airquality are the measurement names, location and season are tags, + ## where weather and airquality are the measurement names, location and season are tags, ## and temperature, humidity, no2, pm25 are fields. ## In multi-table mode: ## - first line will be ingested to table named weather ## - second line will be ingested to table named airquality ## - the tags will be represented as dimensions ## - first table (weather) will have two records: - ## one with measurement name equals to temperature, + ## one with measurement name equals to temperature, ## another with measurement name equals to humidity ## - second table (airquality) will have two records: - ## one with measurement name equals to no2, + ## one with measurement name equals to no2, ## another with measurement name equals to pm25 ## - the Timestream tables from the example will look like this: ## TABLE "weather": @@ -93,7 +93,7 @@ The Timestream output plugin writes metrics to the [Amazon Timestream] service. ## Specifies the Timestream table where the metrics will be uploaded. # single_table_name = "yourTableNameHere" - ## Only valid and required for mapping_mode = "single-table" + ## Only valid and required for mapping_mode = "single-table" ## Describes what will be the Timestream dimension name for the Telegraf ## measurement name. # single_table_dimension_name_for_telegraf_measurement_name = "namespace" @@ -135,8 +135,9 @@ In case of an attempt to write an unsupported by Timestream Telegraf Field type, In case of receiving ThrottlingException or InternalServerException from Timestream, the errors are returned to Telegraf, in which case Telegraf will keep the metrics in buffer and retry writing those metrics on the next flush. In case of receiving ResourceNotFoundException: - - If `create_table_if_not_exists` configuration is set to `true`, the plugin will try to create appropriate table and write the records again, if the table creation was successful. - - If `create_table_if_not_exists` configuration is set to `false`, the records are dropped, and an error is emitted to the logs. + +- If `create_table_if_not_exists` configuration is set to `true`, the plugin will try to create appropriate table and write the records again, if the table creation was successful. +- If `create_table_if_not_exists` configuration is set to `false`, the records are dropped, and an error is emitted to the logs. In case of receiving any other AWS error from Timestream, the records are dropped, and an error is emitted to the logs, as retrying such requests isn't likely to succeed. @@ -148,8 +149,8 @@ Turn on debug flag in the Telegraf to turn on detailed logging (including record Execute unit tests with: -``` +```shell go test -v ./plugins/outputs/timestream/... ``` -[Amazon Timestream]: https://aws.amazon.com/timestream/ \ No newline at end of file +[Amazon Timestream]: https://aws.amazon.com/timestream/ diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md index 07e6cd25b92be..4ffc2ce73ff5c 100644 --- a/plugins/outputs/warp10/README.md +++ b/plugins/outputs/warp10/README.md @@ -2,7 +2,7 @@ The `warp10` output plugin writes metrics to [Warp 10][]. -### Configuration +## Configuration ```toml [[outputs.warp10]] @@ -32,7 +32,7 @@ The `warp10` output plugin writes metrics to [Warp 10][]. # insecure_skip_verify = false ``` -### Output Format +## Output Format Metrics are converted and sent using the [Geo Time Series][] (GTS) input format. diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 8439295bbe029..6ccd6e35ef268 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -2,8 +2,7 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefront data format over TCP. - -### Configuration: +## Configuration ```toml ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy @@ -11,8 +10,8 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro url = "https://metrics.wavefront.com" ## Authentication Token for Wavefront. Only required if using Direct Ingestion - #token = "DUMMY_TOKEN" - + #token = "DUMMY_TOKEN" + ## DNS name of the wavefront proxy server. Do not use if url is specified #host = "wavefront.example.com" @@ -35,7 +34,7 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## Use Strict rules to sanitize metric and tag names from invalid characters ## When enabled forward slash (/) and comma (,) will be accepted #use_strict = false - + ## Use Regex to sanitize metric and tag names from invalid characters ## Regex is more thorough, but significantly slower. default is false #use_regex = false @@ -46,46 +45,48 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true - ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any + ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. #truncate_tags = false ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics - ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending + ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in ## Telegraf. #immediate_flush = true ``` - ### Convert Path & Metric Separator -If the `convert_path` option is true any `_` in metric and field names will be converted to the `metric_separator` value. -By default, to ease metrics browsing in the Wavefront UI, the `convert_path` option is true, and `metric_separator` is `.` (dot). + +If the `convert_path` option is true any `_` in metric and field names will be converted to the `metric_separator` value. +By default, to ease metrics browsing in the Wavefront UI, the `convert_path` option is true, and `metric_separator` is `.` (dot). Default integrations within Wavefront expect these values to be set to their defaults, however if converting from another platform it may be desirable to change these defaults. - ### Use Regex -Most illegal characters in the metric name are automatically converted to `-`. -The `use_regex` setting can be used to ensure all illegal characters are properly handled, but can lead to performance degradation. +Most illegal characters in the metric name are automatically converted to `-`. +The `use_regex` setting can be used to ensure all illegal characters are properly handled, but can lead to performance degradation. ### Source Override -Often when collecting metrics from another system, you want to use the target system as the source, not the one running Telegraf. + +Often when collecting metrics from another system, you want to use the target system as the source, not the one running Telegraf. Many Telegraf plugins will identify the target source with a tag. The tag name can vary for different plugins. The `source_override` -option will use the value specified in any of the listed tags if found. The tag names are checked in the same order as listed, -and if found, the other tags will not be checked. If no tags specified are found, the default host tag will be used to identify the +option will use the value specified in any of the listed tags if found. The tag names are checked in the same order as listed, +and if found, the other tags will not be checked. If no tags specified are found, the default host tag will be used to identify the source of the metric. - ### Wavefront Data format + The expected input for Wavefront is specified in the following way: -``` + +```text [] = [tagk1=tagv1 ...tagkN=tagvN] ``` -More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031) +More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031) ### Allowed values for metrics -Wavefront allows `integers` and `floats` as input values. By default it also maps `bool` values to numeric, false -> 0.0, + +Wavefront allows `integers` and `floats` as input values. By default it also maps `bool` values to numeric, false -> 0.0, true -> 1.0. To map `strings` use the [enum](../../processors/enum) processor plugin. diff --git a/plugins/outputs/websocket/README.md b/plugins/outputs/websocket/README.md index 577c10e6b0083..51d329317a0b7 100644 --- a/plugins/outputs/websocket/README.md +++ b/plugins/outputs/websocket/README.md @@ -4,7 +4,7 @@ This plugin can write to a WebSocket endpoint. It can output data in any of the [supported output formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). -### Configuration: +## Configuration ```toml # A plugin that can transmit metrics over WebSocket. diff --git a/plugins/outputs/yandex_cloud_monitoring/README.md b/plugins/outputs/yandex_cloud_monitoring/README.md index 3bace22b4adb2..412a57e4e18f7 100644 --- a/plugins/outputs/yandex_cloud_monitoring/README.md +++ b/plugins/outputs/yandex_cloud_monitoring/README.md @@ -1,9 +1,8 @@ # Yandex Cloud Monitoring -This plugin will send custom metrics to Yandex Cloud Monitoring. -https://cloud.yandex.com/services/monitoring +This plugin will send custom metrics to [Yandex Cloud Monitoring](https://cloud.yandex.com/services/monitoring). -### Configuration: +## Configuration ```toml [[outputs.yandex_cloud_monitoring]] From d4582dca7081f2531f62d2b79375406b250e70eb Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 24 Nov 2021 11:50:01 -0700 Subject: [PATCH 072/133] chore: clean up all markdown lint error on input plugins n through r (#10168) --- plugins/inputs/nats/README.md | 8 +- plugins/inputs/nats_consumer/README.md | 2 +- plugins/inputs/neptune_apex/README.md | 35 +- plugins/inputs/net/NETSTAT_README.md | 12 +- plugins/inputs/net/NET_README.md | 16 +- plugins/inputs/net_response/README.md | 8 +- plugins/inputs/nfsclient/README.md | 170 +++-- plugins/inputs/nginx/README.md | 31 +- plugins/inputs/nginx_plus/README.md | 14 +- plugins/inputs/nginx_plus_api/README.md | 17 +- plugins/inputs/nginx_sts/README.md | 14 +- plugins/inputs/nginx_upstream_check/README.md | 32 +- plugins/inputs/nginx_vts/README.md | 17 +- plugins/inputs/nsd/README.md | 11 +- plugins/inputs/nsq/README.md | 2 +- plugins/inputs/nsq_consumer/README.md | 2 +- plugins/inputs/nstat/README.md | 621 +++++++++--------- plugins/inputs/ntpq/README.md | 30 +- plugins/inputs/nvidia_smi/README.md | 25 +- plugins/inputs/opcua/README.md | 21 +- plugins/inputs/openldap/README.md | 88 +-- plugins/inputs/openntpd/README.md | 13 +- plugins/inputs/opensmtpd/README.md | 13 +- plugins/inputs/openstack/README.md | 452 ++++++------- plugins/inputs/opentelemetry/README.md | 18 +- plugins/inputs/openweathermap/README.md | 11 +- plugins/inputs/passenger/README.md | 13 +- plugins/inputs/pf/README.md | 55 +- plugins/inputs/pgbouncer/README.md | 12 +- plugins/inputs/phpfpm/README.md | 8 +- plugins/inputs/ping/README.md | 45 +- plugins/inputs/postfix/README.md | 27 +- plugins/inputs/postgresql/README.md | 9 +- .../inputs/postgresql_extensible/README.md | 30 +- plugins/inputs/powerdns/README.md | 15 +- plugins/inputs/powerdns_recursor/README.md | 23 +- plugins/inputs/processes/README.md | 12 +- plugins/inputs/procstat/README.md | 11 +- plugins/inputs/prometheus/README.md | 39 +- plugins/inputs/proxmox/README.md | 20 +- plugins/inputs/puppetagent/README.md | 101 +-- plugins/inputs/rabbitmq/README.md | 16 +- plugins/inputs/raindrops/README.md | 28 +- plugins/inputs/ras/README.md | 11 +- plugins/inputs/ravendb/README.md | 18 +- plugins/inputs/redfish/README.md | 14 +- plugins/inputs/redis/README.md | 171 ++--- plugins/inputs/rethinkdb/README.md | 6 +- plugins/inputs/riak/README.md | 10 +- plugins/inputs/riemann_listener/README.md | 4 +- 50 files changed, 1233 insertions(+), 1148 deletions(-) diff --git a/plugins/inputs/nats/README.md b/plugins/inputs/nats/README.md index 362ee17b2aa65..59262bfd6cb52 100644 --- a/plugins/inputs/nats/README.md +++ b/plugins/inputs/nats/README.md @@ -3,7 +3,7 @@ The [NATS](http://www.nats.io/about/) monitoring plugin gathers metrics from the NATS [monitoring http server](https://www.nats.io/documentation/server/gnatsd-monitoring/). -### Configuration +## Configuration ```toml [[inputs.nats]] @@ -14,7 +14,7 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats # response_timeout = "5s" ``` -### Metrics: +## Metrics - nats - tags @@ -35,8 +35,8 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats - out_msgs (integer, count) - in_bytes (integer, bytes) -### Example Output: +## Example Output -``` +```shell nats,server=http://localhost:8222 uptime=117158348682i,mem=6647808i,subscriptions=0i,out_bytes=0i,connections=0i,in_msgs=0i,total_connections=0i,cores=2i,cpu=0,slow_consumers=0i,routes=0i,remotes=0i,out_msgs=0i,in_bytes=0i 1517015107000000000 ``` diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index ae40d9185100a..9e46bf4ebcc99 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -6,7 +6,7 @@ creates metrics using one of the supported [input data formats][]. A [Queue Group][queue group] is used when subscribing to subjects so multiple instances of telegraf can read from a NATS cluster in parallel. -### Configuration: +## Configuration ```toml [[inputs.nats_consumer]] diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md index 6fd28a16a6d21..df0de4574eca4 100644 --- a/plugins/inputs/neptune_apex/README.md +++ b/plugins/inputs/neptune_apex/README.md @@ -6,8 +6,7 @@ in the telegraf.conf configuration file. The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-time data from the Apex's status.xml page. - -### Configuration +## Configuration ```toml [[inputs.neptune_apex]] @@ -25,7 +24,7 @@ The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-t ``` -### Metrics +## Metrics The Neptune Apex controller family allows an aquarium hobbyist to monitor and control their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified @@ -62,38 +61,42 @@ programming. These tags are clearly marked in the list below and should be consi - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none" - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none" - serial (string, serial number) - - time: - - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + - time: + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. - -### Sample Queries - +## Sample Queries Get the max, mean, and min for the temperature in the last hour: + ```sql SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s) ``` -### Troubleshooting +## Troubleshooting + +### sendRequest failure -#### sendRequest failure This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl: + ```sh -$ curl apex.local/cgi-bin/status.xml +curl apex.local/cgi-bin/status.xml ``` + to isolate the problem. -#### parseXML errors +### parseXML errors + Ensure the XML being returned is valid. If you get valid XML back, open a bug request. -#### Missing fields/data +### Missing fields/data + The neptune_apex plugin is strict on its input to prevent any conversion errors. If you have fields in the status.xml output that are not converted to a metric, open a feature request and paste your whole status.xml -### Example Output +## Example Output -``` +```text neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 @@ -138,7 +141,7 @@ neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex ``` -### Contributing +## Contributing This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for diff --git a/plugins/inputs/net/NETSTAT_README.md b/plugins/inputs/net/NETSTAT_README.md index d0f39f5e400e6..95a9ad6124d2e 100644 --- a/plugins/inputs/net/NETSTAT_README.md +++ b/plugins/inputs/net/NETSTAT_README.md @@ -2,7 +2,7 @@ This plugin collects TCP connections state and UDP socket counts by using `lsof`. -### Configuration: +## Configuration ``` toml # Collect TCP connections state and UDP socket counts @@ -10,7 +10,7 @@ This plugin collects TCP connections state and UDP socket counts by using `lsof` # no configuration ``` -# Measurements: +## Measurements Supported TCP Connection states are follows. @@ -27,12 +27,14 @@ Supported TCP Connection states are follows. - closing - none -### TCP Connection State measurements: +## TCP Connection State measurements Meta: + - units: counts Measurement names: + - tcp_established - tcp_syn_sent - tcp_syn_recv @@ -48,10 +50,12 @@ Measurement names: If there are no connection on the state, the metric is not counted. -### UDP socket counts measurements: +## UDP socket counts measurements Meta: + - units: counts Measurement names: + - udp_socket diff --git a/plugins/inputs/net/NET_README.md b/plugins/inputs/net/NET_README.md index d2571d29e9ede..243293b93532d 100644 --- a/plugins/inputs/net/NET_README.md +++ b/plugins/inputs/net/NET_README.md @@ -2,7 +2,7 @@ This plugin gathers metrics about network interface and protocol usage (Linux only). -### Configuration: +## Configuration ```toml # Gather metrics about network interfaces @@ -21,7 +21,7 @@ This plugin gathers metrics about network interface and protocol usage (Linux on ## ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the _net_ measurement. @@ -42,14 +42,14 @@ Under freebsd/openbsd and darwin the plugin uses netstat. Additionally, for the time being _only under Linux_, the plugin gathers system wide stats for different network protocols using /proc/net/snmp (tcp, udp, icmp, etc.). Explanation of the different metrics exposed by snmp is out of the scope of this document. The best way to find information would be tracing the constants in the Linux kernel source [here](https://elixir.bootlin.com/linux/latest/source/net/ipv4/proc.c) and their usage. If /proc/net/snmp cannot be read for some reason, telegraf ignores the error silently. -### Tags: +## Tags * Net measurements have the following tags: - - interface (the interface from which metrics are gathered) + * interface (the interface from which metrics are gathered) Under Linux the system wide protocol metrics have the interface=all tag. -### Sample Queries: +## Sample Queries You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values. @@ -57,15 +57,15 @@ You can use the following query to get the upload/download traffic rate per seco SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0); ``` -### Example Output: +## Example Output -``` +```shell # All platforms $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 ``` -``` +```shell # Linux $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 2c492408beef2..e64a7ebf605cc 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -3,7 +3,7 @@ The input plugin test UDP/TCP connections response time and can optional verify text in the response. -### Configuration: +## Configuration ```toml # Collect response time of a TCP or UDP connection @@ -33,7 +33,7 @@ verify text in the response. # fielddrop = ["result_type", "string_found"] ``` -### Metrics: +## Metrics - net_response - tags: @@ -47,9 +47,9 @@ verify text in the response. - result_type (string) **DEPRECATED in 1.7; use result tag** - string_found (boolean) **DEPRECATED in 1.4; use result tag** -### Example Output: +## Example Output -``` +```shell net_response,port=8086,protocol=tcp,result=success,server=localhost response_time=0.000092948,result_code=0i,result_type="success" 1525820185000000000 net_response,port=8080,protocol=tcp,result=connection_failed,server=localhost result_code=2i,result_type="connection_failed" 1525820088000000000 net_response,port=8080,protocol=udp,result=read_failed,server=localhost result_code=3i,result_type="read_failed",string_found=false 1525820088000000000 diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md index 1ed1a08424bbb..b28ceb32cc754 100644 --- a/plugins/inputs/nfsclient/README.md +++ b/plugins/inputs/nfsclient/README.md @@ -5,7 +5,7 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed **NOTE** Many of the metrics, even if tagged with a mount point, are really _per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and `nfs01:/vol/foo/baz`, there will be two near identical entries in /proc/self/mountstats. This is a limitation of the metrics exposed by the kernel, not the telegraf plugin. -### Configuration +## Configuration ```toml [[inputs.nfsclient]] @@ -35,7 +35,9 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed # include_operations = [] # exclude_operations = [] ``` -#### Configuration Options + +### Configuration Options + - **fullstat** bool: Collect per-operation type metrics. Defaults to false. - **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. - **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. @@ -44,121 +46,119 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed *N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. -#### References +### References + 1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) 2. [net/sunrpc/stats.c - Linux source code](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/stats.c) 3. [What is in /proc/self/mountstats for NFS mounts: an introduction](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex) 4. [The xprt: data for NFS mounts in /proc/self/mountstats](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsXprt) +## Metrics - -### Metrics - -#### Fields +### Fields - nfsstat - - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) - - ops (integer, count) - The number of operations of this type executed. - - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) - - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. - - rtt (integer, miliseconds) - The round-trip time for operations. + - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) + - ops (integer, count) - The number of operations of this type executed. + - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) + - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. + - rtt (integer, miliseconds) - The round-trip time for operations. In addition enabling `fullstat` will make many more metrics available. -#### Tags +### Tags - All measurements have the following tags: - - mountpoint - The local mountpoint, for instance: "/var/www" - - serverexport - The full server export, for instance: "nfsserver.example.org:/export" + - mountpoint - The local mountpoint, for instance: "/var/www" + - serverexport - The full server export, for instance: "nfsserver.example.org:/export" - Measurements nfsstat and nfs_ops will also include: - - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. - - + - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. -### Additional metrics +## Additional metrics When `fullstat` is true, additional measurements are collected. Tags are the same as above. -#### NFS Operations +### NFS Operations Most descriptions come from Reference [[3](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex)] and `nfs_iostat.h`. Field order and names are the same as in `/proc/self/mountstats` and the Kernel source. Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes occasionally. - nfs_bytes - - fields: - - normalreadbytes (int, bytes): Bytes read from the server via `read()` - - normalwritebytes (int, bytes): Bytes written to the server via `write()` - - directreadbytes (int, bytes): Bytes read with O_DIRECT set - - directwritebytes (int, bytes): Bytes written with O_DIRECT set - - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) - - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) - - readpages (int, count): Number of pages read - - writepages (int, count): Number of pages written + - fields: + - normalreadbytes (int, bytes): Bytes read from the server via `read()` + - normalwritebytes (int, bytes): Bytes written to the server via `write()` + - directreadbytes (int, bytes): Bytes read with O_DIRECT set + - directwritebytes (int, bytes): Bytes written with O_DIRECT set + - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) + - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) + - readpages (int, count): Number of pages read + - writepages (int, count): Number of pages written - nfs_events (Per-event metrics) - - fields: - - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. - - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. - - datainvalidates (int, count): How many times an inode had its cached data thrown out. - - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. - - vfsopen (int, count): How many times files or directories have been `open()`'d. - - vfslookup (int, count): How many name lookups in directories there have been. - - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") - - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. - - vfsreadpage (int, count): Number of pages read. - - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). - - vfswritepage (int, count): Number of pages written. - - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) - - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") - - vfssetattr (int, count): How many times we've set attributes on inodes. - - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. - - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. - - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). - - vfsrelease (int, count): Number of calls to `close()`. - - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. - - setattrtrunc (int, count): How many times files have had their size truncated. - - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. - - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) - - shortreads (int, count): Number of times the NFS server returned less data than requested. - - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. - - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) - - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. - - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. + - fields: + - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. + - datainvalidates (int, count): How many times an inode had its cached data thrown out. + - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. + - vfsopen (int, count): How many times files or directories have been `open()`'d. + - vfslookup (int, count): How many name lookups in directories there have been. + - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") + - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. + - vfsreadpage (int, count): Number of pages read. + - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage (int, count): Number of pages written. + - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr (int, count): How many times we've set attributes on inodes. + - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. + - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. + - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease (int, count): Number of calls to `close()`. + - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc (int, count): How many times files have had their size truncated. + - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads (int, count): Number of times the NFS server returned less data than requested. + - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. + - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. + - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. - nfs_xprt_tcp - - fields: - - bind_count (int, count): Number of _completely new_ mounts to this server (sometimes 0?) - - connect_count (int, count): How many times the client has connected to the server in question - - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. - - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. - - rpcsends (int, count): How many RPC requests this mount has sent to the server. - - rpcreceives (int, count): How many RPC replies this mount has received from the server. - - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. - - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) - - backlogutil (int, count): Cumulative backlog count + - fields: + - bind_count (int, count): Number of_completely new_ mounts to this server (sometimes 0?) + - connect_count (int, count): How many times the client has connected to the server in question + - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends (int, count): How many RPC requests this mount has sent to the server. + - rpcreceives (int, count): How many RPC replies this mount has received from the server. + - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. + - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil (int, count): Cumulative backlog count - nfs_xprt_udp - - fields: - - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] + - fields: + - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] - nfs_ops - - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): - - ops (int, count): Total operations of this type. - - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). - - timeouts (int, count): Number of major timeouts. - - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). - - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). - - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. - - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. - - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. - - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 - - -### Example Output + - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): + - ops (int, count): Total operations of this type. + - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts (int, count): Number of major timeouts. + - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). + - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). + - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. + - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. + - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. + - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + +## Example Output + For basic metrics showing server-wise read and write data. -``` + +```shell nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 @@ -168,7 +168,7 @@ For `fullstat=true` metrics, which includes additional measurements for `nfs_byt Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. -``` +```shell nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 @@ -177,5 +177,3 @@ nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0 nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 ``` - - diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index bc4916507ef25..4859aa74c96f9 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -1,6 +1,6 @@ # Nginx Input Plugin -### Configuration: +## Configuration ```toml # Read Nginx's basic status information (ngx_http_stub_status_module) @@ -19,26 +19,27 @@ response_timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields - Measurement - - accepts - - active - - handled - - reading - - requests - - waiting - - writing + - accepts + - active + - handled + - reading + - requests + - waiting + - writing -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx]] ## An array of Nginx stub_status URI to gather stats. @@ -46,12 +47,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx --test ``` It produces: -``` + +```shell * Plugin: nginx, Collection 1 > nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331 ``` diff --git a/plugins/inputs/nginx_plus/README.md b/plugins/inputs/nginx_plus/README.md index cb0713ed848ff..5afb82d2f7c7e 100644 --- a/plugins/inputs/nginx_plus/README.md +++ b/plugins/inputs/nginx_plus/README.md @@ -5,7 +5,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use Structures for Nginx Plus have been built based on history of [status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html) -### Configuration: +## Configuration ```toml # Read Nginx Plus' advanced status information @@ -14,7 +14,7 @@ Structures for Nginx Plus have been built based on history of urls = ["http://localhost/status"] ``` -### Measurements & Fields: +## Measurements & Fields - nginx_plus_processes - respawned @@ -59,8 +59,7 @@ Structures for Nginx Plus have been built based on history of - fails - downtime - -### Tags: +## Tags - nginx_plus_processes, nginx_plus_connections, nginx_plus_ssl, nginx_plus_requests - server @@ -78,9 +77,10 @@ Structures for Nginx Plus have been built based on history of - port - upstream_address -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus]] ## An array of Nginx Plus status URIs to gather stats. @@ -88,12 +88,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus -test ``` It produces: -``` + +```text * Plugin: inputs.nginx_plus, Collection 1 > nginx_plus_processes,server=localhost,port=12021,host=word.local respawned=0i 1505782513000000000 > nginx_plus_connections,server=localhost,port=12021,host=word.local accepted=5535735212i,dropped=10140186i,active=9541i,idle=67540i 1505782513000000000 diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md index 57cb127b5dd12..3d8d9bf07a6fc 100644 --- a/plugins/inputs/nginx_plus_api/README.md +++ b/plugins/inputs/nginx_plus_api/README.md @@ -2,7 +2,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). -### Configuration: +## Configuration ```toml # Read Nginx Plus API advanced status information @@ -13,7 +13,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use # api_version = 3 ``` -### Migration from Nginx Plus (Status) input plugin +## Migration from Nginx Plus (Status) input plugin | Nginx Plus | Nginx Plus API | |---------------------------------|--------------------------------------| @@ -29,7 +29,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | | nginx.stream.zone | nginx_plus_api_stream_server_zones | -### Measurements by API version +## Measurements by API version | Measurement | API version (api_version) | |--------------------------------------|---------------------------| @@ -47,7 +47,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_api_http_location_zones | >= 5 | | nginx_plus_api_resolver_zones | >= 5 | -### Measurements & Fields: +## Measurements & Fields - nginx_plus_api_processes - respawned @@ -171,7 +171,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - timedout - unknown -### Tags: +## Tags - nginx_plus_api_processes, nginx_plus_api_connections, nginx_plus_api_ssl, nginx_plus_api_http_requests - source @@ -198,9 +198,10 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - source - port -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus_api]] ## An array of Nginx Plus API URIs to gather stats. @@ -208,12 +209,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus_api -test ``` It produces: -``` + +```text > nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000 > nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000 > nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000 diff --git a/plugins/inputs/nginx_sts/README.md b/plugins/inputs/nginx_sts/README.md index 935bc9af83c62..7d23fd029dfb5 100644 --- a/plugins/inputs/nginx_sts/README.md +++ b/plugins/inputs/nginx_sts/README.md @@ -1,7 +1,7 @@ # Nginx Stream STS Input Plugin This plugin gathers Nginx status using external virtual host traffic status -module - https://github.com/vozlt/nginx-module-sts. This is an Nginx module +module - . This is an Nginx module that provides access to stream host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. For module configuration details please see its @@ -9,7 +9,7 @@ monitoring of Nginx plus. For module configuration details please see its Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[inputs.nginx_sts]] @@ -27,7 +27,7 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - nginx_sts_connections - tags: @@ -42,7 +42,7 @@ Telegraf minimum version: Telegraf 1.15.0 - handled - requests -+ nginx_sts_server +- nginx_sts_server - tags: - source - port @@ -77,7 +77,7 @@ Telegraf minimum version: Telegraf 1.15.0 - session_msec_counter - session_msec -+ nginx_sts_upstream +- nginx_sts_upstream - tags: - source - port @@ -106,9 +106,9 @@ Telegraf minimum version: Telegraf 1.15.0 - backup - down -### Example Output: +## Example Output -``` +```shell nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=1.2.3.4:8080 upstream_connect_msec_counter=0i,out_bytes=0i,down=false,connects=0i,session_msec=0i,upstream_session_msec=0i,upstream_session_msec_counter=0i,upstream_connect_msec=0i,upstream_firstbyte_msec_counter=0i,response_3xx_count=0i,session_msec_counter=0i,weight=1i,max_fails=1i,backup=false,upstream_firstbyte_msec=0i,in_bytes=0i,response_1xx_count=0i,response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,fail_timeout=10i 1584699180000000000 nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=9.8.7.6:8080 upstream_firstbyte_msec_counter=0i,response_2xx_count=0i,down=false,upstream_session_msec_counter=0i,out_bytes=0i,response_5xx_count=0i,weight=1i,max_fails=1i,fail_timeout=10i,connects=0i,session_msec_counter=0i,upstream_session_msec=0i,in_bytes=0i,response_1xx_count=0i,response_3xx_count=0i,response_4xx_count=0i,session_msec=0i,upstream_connect_msec=0i,upstream_connect_msec_counter=0i,upstream_firstbyte_msec=0i,backup=false 1584699180000000000 nginx_sts_server,host=localhost,port=80,source=127.0.0.1,zone=* response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,session_msec_counter=0i,in_bytes=0i,out_bytes=0i,session_msec=0i,response_1xx_count=0i,response_3xx_count=0i,connects=0i 1584699180000000000 diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md index 58bee07be931d..6e5974ebf801e 100644 --- a/plugins/inputs/nginx_upstream_check/README.md +++ b/plugins/inputs/nginx_upstream_check/README.md @@ -1,6 +1,6 @@ # Nginx Upstream Check Input Plugin -Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module). +Read the status output of the nginx_upstream_check (). This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine if the server is still available. If checks are failed the server is marked as "down" and will not receive any requests until the check will pass and a server will be marked as "up" again. @@ -8,7 +8,7 @@ until the check will pass and a server will be marked as "up" again. The status page displays the current status of all upstreams and servers as well as number of the failed and successful checks. This information can be exported in JSON format and parsed by this input. -### Configuration: +## Configuration ```toml ## An URL where Nginx Upstream check module is enabled @@ -39,36 +39,38 @@ checks. This information can be exported in JSON format and parsed by this input # insecure_skip_verify = false ``` -### Measurements & Fields: +## Measurements & Fields - Measurement - - fall (The number of failed server check attempts, counter) - - rise (The number of successful server check attempts, counter) - - status (The reporter server status as a string) - - status_code (The server status code. 1 - up, 2 - down, 0 - other) + - fall (The number of failed server check attempts, counter) + - rise (The number of successful server check attempts, counter) + - status (The reporter server status as a string) + - status_code (The server status code. 1 - up, 2 - down, 0 - other) The "status_code" field most likely will be the most useful one because it allows you to determine the current state of every server and, possible, add some monitoring to watch over it. InfluxDB can use string values and the "status" field can be used instead, but for most other monitoring solutions the integer code will be appropriate. -### Tags: +## Tags - All measurements have the following tags: - - name (The hostname or IP of the upstream server) - - port (The alternative check port, 0 if the default one is used) - - type (The check type, http/tcp) - - upstream (The name of the upstream block in the Nginx configuration) - - url (The status url used by telegraf) + - name (The hostname or IP of the upstream server) + - port (The alternative check port, 0 if the default one is used) + - type (The check type, http/tcp) + - upstream (The name of the upstream block in the Nginx configuration) + - url (The status url used by telegraf) -### Example Output: +## Example Output When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test ``` It produces: -``` + +```text * Plugin: nginx_upstream_check, Collection 1 > nginx_upstream_check,host=node1,name=192.168.0.1:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=0i,rise=100i,status="up",status_code=1i 1529088524000000000 > nginx_upstream_check,host=node2,name=192.168.0.2:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=100i,rise=0i,status="down",status_code=2i 1529088524000000000 diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md index fe9e7fd6ea62f..117b0ca24cb17 100644 --- a/plugins/inputs/nginx_vts/README.md +++ b/plugins/inputs/nginx_vts/README.md @@ -1,9 +1,9 @@ # Nginx Virtual Host Traffic (VTS) Input Plugin -This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. +This plugin gathers Nginx status using external virtual host traffic status module - . This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis). -### Configuration: +## Configuration ```toml # Read nginx status information using nginx-module-vts module @@ -12,7 +12,7 @@ For module configuration details please see its [documentation](https://github.c urls = ["http://localhost/status"] ``` -### Measurements & Fields: +## Measurements & Fields - nginx_vts_connections - active @@ -70,8 +70,7 @@ For module configuration details please see its [documentation](https://github.c - hit - scarce - -### Tags: +## Tags - nginx_vts_connections - source @@ -95,10 +94,10 @@ For module configuration details please see its [documentation](https://github.c - port - zone - -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_vts]] ## An array of Nginx status URIs to gather stats. @@ -106,12 +105,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_vts -test ``` It produces: -``` + +```shell nginx_vts_connections,source=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 nginx_vts_server,zone=example.com,port=80,host=localhost,source=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 nginx_vts_server,host=localhost,source=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 diff --git a/plugins/inputs/nsd/README.md b/plugins/inputs/nsd/README.md index 2d7f8833c2db8..51c45b1f4fb66 100644 --- a/plugins/inputs/nsd/README.md +++ b/plugins/inputs/nsd/README.md @@ -4,7 +4,7 @@ This plugin gathers stats from [NSD](https://www.nlnetlabs.nl/projects/nsd/about) - an authoritative DNS name server. -### Configuration: +## Configuration ```toml # A plugin to collect stats from the NSD DNS resolver @@ -26,7 +26,7 @@ server. # timeout = "1s" ``` -#### Permissions: +### Permissions It's important to note that this plugin references nsd-control, which may require additional permissions to execute successfully. Depending on the @@ -34,6 +34,7 @@ user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -46,12 +47,14 @@ telegraf : telegraf nsd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.nsd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -62,11 +65,11 @@ Defaults!NSDCONTROLCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Metrics: +## Metrics This is the full list of stats provided by nsd-control. In the output, the dots in the nsd-control stat name are replaced by underscores (see -https://www.nlnetlabs.nl/documentation/nsd/nsd-control/ for details). + for details). - nsd - fields: diff --git a/plugins/inputs/nsq/README.md b/plugins/inputs/nsq/README.md index 00c1089afe309..78ba28d49b11f 100644 --- a/plugins/inputs/nsq/README.md +++ b/plugins/inputs/nsq/README.md @@ -1,6 +1,6 @@ # NSQ Input Plugin -### Configuration: +## Configuration ```toml # Description diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index d1e7194bbd7e0..b10bfbf6f7b68 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -3,7 +3,7 @@ The [NSQ][nsq] consumer plugin reads from NSQD and creates metrics using one of the supported [input data formats][]. -### Configuration: +## Configuration ```toml # Read metrics from NSQD topic(s) diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md index c0ebc2654f5b8..0e2fa217300c7 100644 --- a/plugins/inputs/nstat/README.md +++ b/plugins/inputs/nstat/README.md @@ -2,10 +2,11 @@ Plugin collects network metrics from `/proc/net/netstat`, `/proc/net/snmp` and `/proc/net/snmp6` files -### Configuration +## Configuration The plugin firstly tries to read file paths from config values if it is empty, then it reads from env variables. + * `PROC_NET_NETSTAT` * `PROC_NET_SNMP` * `PROC_NET_SNMP6` @@ -15,331 +16,335 @@ then it tries to read the proc root from env - `PROC_ROOT`, and sets `/proc` as a root path if `PROC_ROOT` is also empty. Then appends default file paths: + * `/net/netstat` * `/net/snmp` * `/net/snmp6` So if nothing is given, no paths in config and in env vars, the plugin takes the default paths. + * `/proc/net/netstat` * `/proc/net/snmp` * `/proc/net/snmp6` The sample config file + ```toml [[inputs.nstat]] ## file paths ## e.g: /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 - # proc_net_netstat = "" - # proc_net_snmp = "" - # proc_net_snmp6 = "" + # proc_net_netstat = "" + # proc_net_snmp = "" + # proc_net_snmp6 = "" ## dump metrics with 0 values too - # dump_zeros = true + # dump_zeros = true ``` In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no error would be raised. -### Measurements & Fields +## Measurements & Fields + +* nstat + * Icmp6InCsumErrors + * Icmp6InDestUnreachs + * Icmp6InEchoReplies + * Icmp6InEchos + * Icmp6InErrors + * Icmp6InGroupMembQueries + * Icmp6InGroupMembReductions + * Icmp6InGroupMembResponses + * Icmp6InMLDv2Reports + * Icmp6InMsgs + * Icmp6InNeighborAdvertisements + * Icmp6InNeighborSolicits + * Icmp6InParmProblems + * Icmp6InPktTooBigs + * Icmp6InRedirects + * Icmp6InRouterAdvertisements + * Icmp6InRouterSolicits + * Icmp6InTimeExcds + * Icmp6OutDestUnreachs + * Icmp6OutEchoReplies + * Icmp6OutEchos + * Icmp6OutErrors + * Icmp6OutGroupMembQueries + * Icmp6OutGroupMembReductions + * Icmp6OutGroupMembResponses + * Icmp6OutMLDv2Reports + * Icmp6OutMsgs + * Icmp6OutNeighborAdvertisements + * Icmp6OutNeighborSolicits + * Icmp6OutParmProblems + * Icmp6OutPktTooBigs + * Icmp6OutRedirects + * Icmp6OutRouterAdvertisements + * Icmp6OutRouterSolicits + * Icmp6OutTimeExcds + * Icmp6OutType133 + * Icmp6OutType135 + * Icmp6OutType143 + * IcmpInAddrMaskReps + * IcmpInAddrMasks + * IcmpInCsumErrors + * IcmpInDestUnreachs + * IcmpInEchoReps + * IcmpInEchos + * IcmpInErrors + * IcmpInMsgs + * IcmpInParmProbs + * IcmpInRedirects + * IcmpInSrcQuenchs + * IcmpInTimeExcds + * IcmpInTimestampReps + * IcmpInTimestamps + * IcmpMsgInType3 + * IcmpMsgOutType3 + * IcmpOutAddrMaskReps + * IcmpOutAddrMasks + * IcmpOutDestUnreachs + * IcmpOutEchoReps + * IcmpOutEchos + * IcmpOutErrors + * IcmpOutMsgs + * IcmpOutParmProbs + * IcmpOutRedirects + * IcmpOutSrcQuenchs + * IcmpOutTimeExcds + * IcmpOutTimestampReps + * IcmpOutTimestamps + * Ip6FragCreates + * Ip6FragFails + * Ip6FragOKs + * Ip6InAddrErrors + * Ip6InBcastOctets + * Ip6InCEPkts + * Ip6InDelivers + * Ip6InDiscards + * Ip6InECT0Pkts + * Ip6InECT1Pkts + * Ip6InHdrErrors + * Ip6InMcastOctets + * Ip6InMcastPkts + * Ip6InNoECTPkts + * Ip6InNoRoutes + * Ip6InOctets + * Ip6InReceives + * Ip6InTooBigErrors + * Ip6InTruncatedPkts + * Ip6InUnknownProtos + * Ip6OutBcastOctets + * Ip6OutDiscards + * Ip6OutForwDatagrams + * Ip6OutMcastOctets + * Ip6OutMcastPkts + * Ip6OutNoRoutes + * Ip6OutOctets + * Ip6OutRequests + * Ip6ReasmFails + * Ip6ReasmOKs + * Ip6ReasmReqds + * Ip6ReasmTimeout + * IpDefaultTTL + * IpExtInBcastOctets + * IpExtInBcastPkts + * IpExtInCEPkts + * IpExtInCsumErrors + * IpExtInECT0Pkts + * IpExtInECT1Pkts + * IpExtInMcastOctets + * IpExtInMcastPkts + * IpExtInNoECTPkts + * IpExtInNoRoutes + * IpExtInOctets + * IpExtInTruncatedPkts + * IpExtOutBcastOctets + * IpExtOutBcastPkts + * IpExtOutMcastOctets + * IpExtOutMcastPkts + * IpExtOutOctets + * IpForwDatagrams + * IpForwarding + * IpFragCreates + * IpFragFails + * IpFragOKs + * IpInAddrErrors + * IpInDelivers + * IpInDiscards + * IpInHdrErrors + * IpInReceives + * IpInUnknownProtos + * IpOutDiscards + * IpOutNoRoutes + * IpOutRequests + * IpReasmFails + * IpReasmOKs + * IpReasmReqds + * IpReasmTimeout + * TcpActiveOpens + * TcpAttemptFails + * TcpCurrEstab + * TcpEstabResets + * TcpExtArpFilter + * TcpExtBusyPollRxPackets + * TcpExtDelayedACKLocked + * TcpExtDelayedACKLost + * TcpExtDelayedACKs + * TcpExtEmbryonicRsts + * TcpExtIPReversePathFilter + * TcpExtListenDrops + * TcpExtListenOverflows + * TcpExtLockDroppedIcmps + * TcpExtOfoPruned + * TcpExtOutOfWindowIcmps + * TcpExtPAWSActive + * TcpExtPAWSEstab + * TcpExtPAWSPassive + * TcpExtPruneCalled + * TcpExtRcvPruned + * TcpExtSyncookiesFailed + * TcpExtSyncookiesRecv + * TcpExtSyncookiesSent + * TcpExtTCPACKSkippedChallenge + * TcpExtTCPACKSkippedFinWait2 + * TcpExtTCPACKSkippedPAWS + * TcpExtTCPACKSkippedSeq + * TcpExtTCPACKSkippedSynRecv + * TcpExtTCPACKSkippedTimeWait + * TcpExtTCPAbortFailed + * TcpExtTCPAbortOnClose + * TcpExtTCPAbortOnData + * TcpExtTCPAbortOnLinger + * TcpExtTCPAbortOnMemory + * TcpExtTCPAbortOnTimeout + * TcpExtTCPAutoCorking + * TcpExtTCPBacklogDrop + * TcpExtTCPChallengeACK + * TcpExtTCPDSACKIgnoredNoUndo + * TcpExtTCPDSACKIgnoredOld + * TcpExtTCPDSACKOfoRecv + * TcpExtTCPDSACKOfoSent + * TcpExtTCPDSACKOldSent + * TcpExtTCPDSACKRecv + * TcpExtTCPDSACKUndo + * TcpExtTCPDeferAcceptDrop + * TcpExtTCPDirectCopyFromBacklog + * TcpExtTCPDirectCopyFromPrequeue + * TcpExtTCPFACKReorder + * TcpExtTCPFastOpenActive + * TcpExtTCPFastOpenActiveFail + * TcpExtTCPFastOpenCookieReqd + * TcpExtTCPFastOpenListenOverflow + * TcpExtTCPFastOpenPassive + * TcpExtTCPFastOpenPassiveFail + * TcpExtTCPFastRetrans + * TcpExtTCPForwardRetrans + * TcpExtTCPFromZeroWindowAdv + * TcpExtTCPFullUndo + * TcpExtTCPHPAcks + * TcpExtTCPHPHits + * TcpExtTCPHPHitsToUser + * TcpExtTCPHystartDelayCwnd + * TcpExtTCPHystartDelayDetect + * TcpExtTCPHystartTrainCwnd + * TcpExtTCPHystartTrainDetect + * TcpExtTCPKeepAlive + * TcpExtTCPLossFailures + * TcpExtTCPLossProbeRecovery + * TcpExtTCPLossProbes + * TcpExtTCPLossUndo + * TcpExtTCPLostRetransmit + * TcpExtTCPMD5NotFound + * TcpExtTCPMD5Unexpected + * TcpExtTCPMTUPFail + * TcpExtTCPMTUPSuccess + * TcpExtTCPMemoryPressures + * TcpExtTCPMinTTLDrop + * TcpExtTCPOFODrop + * TcpExtTCPOFOMerge + * TcpExtTCPOFOQueue + * TcpExtTCPOrigDataSent + * TcpExtTCPPartialUndo + * TcpExtTCPPrequeueDropped + * TcpExtTCPPrequeued + * TcpExtTCPPureAcks + * TcpExtTCPRcvCoalesce + * TcpExtTCPRcvCollapsed + * TcpExtTCPRenoFailures + * TcpExtTCPRenoRecovery + * TcpExtTCPRenoRecoveryFail + * TcpExtTCPRenoReorder + * TcpExtTCPReqQFullDoCookies + * TcpExtTCPReqQFullDrop + * TcpExtTCPRetransFail + * TcpExtTCPSACKDiscard + * TcpExtTCPSACKReneging + * TcpExtTCPSACKReorder + * TcpExtTCPSYNChallenge + * TcpExtTCPSackFailures + * TcpExtTCPSackMerged + * TcpExtTCPSackRecovery + * TcpExtTCPSackRecoveryFail + * TcpExtTCPSackShiftFallback + * TcpExtTCPSackShifted + * TcpExtTCPSchedulerFailed + * TcpExtTCPSlowStartRetrans + * TcpExtTCPSpuriousRTOs + * TcpExtTCPSpuriousRtxHostQueues + * TcpExtTCPSynRetrans + * TcpExtTCPTSReorder + * TcpExtTCPTimeWaitOverflow + * TcpExtTCPTimeouts + * TcpExtTCPToZeroWindowAdv + * TcpExtTCPWantZeroWindowAdv + * TcpExtTCPWinProbe + * TcpExtTW + * TcpExtTWKilled + * TcpExtTWRecycled + * TcpInCsumErrors + * TcpInErrs + * TcpInSegs + * TcpMaxConn + * TcpOutRsts + * TcpOutSegs + * TcpPassiveOpens + * TcpRetransSegs + * TcpRtoAlgorithm + * TcpRtoMax + * TcpRtoMin + * Udp6IgnoredMulti + * Udp6InCsumErrors + * Udp6InDatagrams + * Udp6InErrors + * Udp6NoPorts + * Udp6OutDatagrams + * Udp6RcvbufErrors + * Udp6SndbufErrors + * UdpIgnoredMulti + * UdpInCsumErrors + * UdpInDatagrams + * UdpInErrors + * UdpLite6InCsumErrors + * UdpLite6InDatagrams + * UdpLite6InErrors + * UdpLite6NoPorts + * UdpLite6OutDatagrams + * UdpLite6RcvbufErrors + * UdpLite6SndbufErrors + * UdpLiteIgnoredMulti + * UdpLiteInCsumErrors + * UdpLiteInDatagrams + * UdpLiteInErrors + * UdpLiteNoPorts + * UdpLiteOutDatagrams + * UdpLiteRcvbufErrors + * UdpLiteSndbufErrors + * UdpNoPorts + * UdpOutDatagrams + * UdpRcvbufErrors + * UdpSndbufErrors -- nstat - - Icmp6InCsumErrors - - Icmp6InDestUnreachs - - Icmp6InEchoReplies - - Icmp6InEchos - - Icmp6InErrors - - Icmp6InGroupMembQueries - - Icmp6InGroupMembReductions - - Icmp6InGroupMembResponses - - Icmp6InMLDv2Reports - - Icmp6InMsgs - - Icmp6InNeighborAdvertisements - - Icmp6InNeighborSolicits - - Icmp6InParmProblems - - Icmp6InPktTooBigs - - Icmp6InRedirects - - Icmp6InRouterAdvertisements - - Icmp6InRouterSolicits - - Icmp6InTimeExcds - - Icmp6OutDestUnreachs - - Icmp6OutEchoReplies - - Icmp6OutEchos - - Icmp6OutErrors - - Icmp6OutGroupMembQueries - - Icmp6OutGroupMembReductions - - Icmp6OutGroupMembResponses - - Icmp6OutMLDv2Reports - - Icmp6OutMsgs - - Icmp6OutNeighborAdvertisements - - Icmp6OutNeighborSolicits - - Icmp6OutParmProblems - - Icmp6OutPktTooBigs - - Icmp6OutRedirects - - Icmp6OutRouterAdvertisements - - Icmp6OutRouterSolicits - - Icmp6OutTimeExcds - - Icmp6OutType133 - - Icmp6OutType135 - - Icmp6OutType143 - - IcmpInAddrMaskReps - - IcmpInAddrMasks - - IcmpInCsumErrors - - IcmpInDestUnreachs - - IcmpInEchoReps - - IcmpInEchos - - IcmpInErrors - - IcmpInMsgs - - IcmpInParmProbs - - IcmpInRedirects - - IcmpInSrcQuenchs - - IcmpInTimeExcds - - IcmpInTimestampReps - - IcmpInTimestamps - - IcmpMsgInType3 - - IcmpMsgOutType3 - - IcmpOutAddrMaskReps - - IcmpOutAddrMasks - - IcmpOutDestUnreachs - - IcmpOutEchoReps - - IcmpOutEchos - - IcmpOutErrors - - IcmpOutMsgs - - IcmpOutParmProbs - - IcmpOutRedirects - - IcmpOutSrcQuenchs - - IcmpOutTimeExcds - - IcmpOutTimestampReps - - IcmpOutTimestamps - - Ip6FragCreates - - Ip6FragFails - - Ip6FragOKs - - Ip6InAddrErrors - - Ip6InBcastOctets - - Ip6InCEPkts - - Ip6InDelivers - - Ip6InDiscards - - Ip6InECT0Pkts - - Ip6InECT1Pkts - - Ip6InHdrErrors - - Ip6InMcastOctets - - Ip6InMcastPkts - - Ip6InNoECTPkts - - Ip6InNoRoutes - - Ip6InOctets - - Ip6InReceives - - Ip6InTooBigErrors - - Ip6InTruncatedPkts - - Ip6InUnknownProtos - - Ip6OutBcastOctets - - Ip6OutDiscards - - Ip6OutForwDatagrams - - Ip6OutMcastOctets - - Ip6OutMcastPkts - - Ip6OutNoRoutes - - Ip6OutOctets - - Ip6OutRequests - - Ip6ReasmFails - - Ip6ReasmOKs - - Ip6ReasmReqds - - Ip6ReasmTimeout - - IpDefaultTTL - - IpExtInBcastOctets - - IpExtInBcastPkts - - IpExtInCEPkts - - IpExtInCsumErrors - - IpExtInECT0Pkts - - IpExtInECT1Pkts - - IpExtInMcastOctets - - IpExtInMcastPkts - - IpExtInNoECTPkts - - IpExtInNoRoutes - - IpExtInOctets - - IpExtInTruncatedPkts - - IpExtOutBcastOctets - - IpExtOutBcastPkts - - IpExtOutMcastOctets - - IpExtOutMcastPkts - - IpExtOutOctets - - IpForwDatagrams - - IpForwarding - - IpFragCreates - - IpFragFails - - IpFragOKs - - IpInAddrErrors - - IpInDelivers - - IpInDiscards - - IpInHdrErrors - - IpInReceives - - IpInUnknownProtos - - IpOutDiscards - - IpOutNoRoutes - - IpOutRequests - - IpReasmFails - - IpReasmOKs - - IpReasmReqds - - IpReasmTimeout - - TcpActiveOpens - - TcpAttemptFails - - TcpCurrEstab - - TcpEstabResets - - TcpExtArpFilter - - TcpExtBusyPollRxPackets - - TcpExtDelayedACKLocked - - TcpExtDelayedACKLost - - TcpExtDelayedACKs - - TcpExtEmbryonicRsts - - TcpExtIPReversePathFilter - - TcpExtListenDrops - - TcpExtListenOverflows - - TcpExtLockDroppedIcmps - - TcpExtOfoPruned - - TcpExtOutOfWindowIcmps - - TcpExtPAWSActive - - TcpExtPAWSEstab - - TcpExtPAWSPassive - - TcpExtPruneCalled - - TcpExtRcvPruned - - TcpExtSyncookiesFailed - - TcpExtSyncookiesRecv - - TcpExtSyncookiesSent - - TcpExtTCPACKSkippedChallenge - - TcpExtTCPACKSkippedFinWait2 - - TcpExtTCPACKSkippedPAWS - - TcpExtTCPACKSkippedSeq - - TcpExtTCPACKSkippedSynRecv - - TcpExtTCPACKSkippedTimeWait - - TcpExtTCPAbortFailed - - TcpExtTCPAbortOnClose - - TcpExtTCPAbortOnData - - TcpExtTCPAbortOnLinger - - TcpExtTCPAbortOnMemory - - TcpExtTCPAbortOnTimeout - - TcpExtTCPAutoCorking - - TcpExtTCPBacklogDrop - - TcpExtTCPChallengeACK - - TcpExtTCPDSACKIgnoredNoUndo - - TcpExtTCPDSACKIgnoredOld - - TcpExtTCPDSACKOfoRecv - - TcpExtTCPDSACKOfoSent - - TcpExtTCPDSACKOldSent - - TcpExtTCPDSACKRecv - - TcpExtTCPDSACKUndo - - TcpExtTCPDeferAcceptDrop - - TcpExtTCPDirectCopyFromBacklog - - TcpExtTCPDirectCopyFromPrequeue - - TcpExtTCPFACKReorder - - TcpExtTCPFastOpenActive - - TcpExtTCPFastOpenActiveFail - - TcpExtTCPFastOpenCookieReqd - - TcpExtTCPFastOpenListenOverflow - - TcpExtTCPFastOpenPassive - - TcpExtTCPFastOpenPassiveFail - - TcpExtTCPFastRetrans - - TcpExtTCPForwardRetrans - - TcpExtTCPFromZeroWindowAdv - - TcpExtTCPFullUndo - - TcpExtTCPHPAcks - - TcpExtTCPHPHits - - TcpExtTCPHPHitsToUser - - TcpExtTCPHystartDelayCwnd - - TcpExtTCPHystartDelayDetect - - TcpExtTCPHystartTrainCwnd - - TcpExtTCPHystartTrainDetect - - TcpExtTCPKeepAlive - - TcpExtTCPLossFailures - - TcpExtTCPLossProbeRecovery - - TcpExtTCPLossProbes - - TcpExtTCPLossUndo - - TcpExtTCPLostRetransmit - - TcpExtTCPMD5NotFound - - TcpExtTCPMD5Unexpected - - TcpExtTCPMTUPFail - - TcpExtTCPMTUPSuccess - - TcpExtTCPMemoryPressures - - TcpExtTCPMinTTLDrop - - TcpExtTCPOFODrop - - TcpExtTCPOFOMerge - - TcpExtTCPOFOQueue - - TcpExtTCPOrigDataSent - - TcpExtTCPPartialUndo - - TcpExtTCPPrequeueDropped - - TcpExtTCPPrequeued - - TcpExtTCPPureAcks - - TcpExtTCPRcvCoalesce - - TcpExtTCPRcvCollapsed - - TcpExtTCPRenoFailures - - TcpExtTCPRenoRecovery - - TcpExtTCPRenoRecoveryFail - - TcpExtTCPRenoReorder - - TcpExtTCPReqQFullDoCookies - - TcpExtTCPReqQFullDrop - - TcpExtTCPRetransFail - - TcpExtTCPSACKDiscard - - TcpExtTCPSACKReneging - - TcpExtTCPSACKReorder - - TcpExtTCPSYNChallenge - - TcpExtTCPSackFailures - - TcpExtTCPSackMerged - - TcpExtTCPSackRecovery - - TcpExtTCPSackRecoveryFail - - TcpExtTCPSackShiftFallback - - TcpExtTCPSackShifted - - TcpExtTCPSchedulerFailed - - TcpExtTCPSlowStartRetrans - - TcpExtTCPSpuriousRTOs - - TcpExtTCPSpuriousRtxHostQueues - - TcpExtTCPSynRetrans - - TcpExtTCPTSReorder - - TcpExtTCPTimeWaitOverflow - - TcpExtTCPTimeouts - - TcpExtTCPToZeroWindowAdv - - TcpExtTCPWantZeroWindowAdv - - TcpExtTCPWinProbe - - TcpExtTW - - TcpExtTWKilled - - TcpExtTWRecycled - - TcpInCsumErrors - - TcpInErrs - - TcpInSegs - - TcpMaxConn - - TcpOutRsts - - TcpOutSegs - - TcpPassiveOpens - - TcpRetransSegs - - TcpRtoAlgorithm - - TcpRtoMax - - TcpRtoMin - - Udp6IgnoredMulti - - Udp6InCsumErrors - - Udp6InDatagrams - - Udp6InErrors - - Udp6NoPorts - - Udp6OutDatagrams - - Udp6RcvbufErrors - - Udp6SndbufErrors - - UdpIgnoredMulti - - UdpInCsumErrors - - UdpInDatagrams - - UdpInErrors - - UdpLite6InCsumErrors - - UdpLite6InDatagrams - - UdpLite6InErrors - - UdpLite6NoPorts - - UdpLite6OutDatagrams - - UdpLite6RcvbufErrors - - UdpLite6SndbufErrors - - UdpLiteIgnoredMulti - - UdpLiteInCsumErrors - - UdpLiteInDatagrams - - UdpLiteInErrors - - UdpLiteNoPorts - - UdpLiteOutDatagrams - - UdpLiteRcvbufErrors - - UdpLiteSndbufErrors - - UdpNoPorts - - UdpOutDatagrams - - UdpRcvbufErrors - - UdpSndbufErrors +## Tags -### Tags -- All measurements have the following tags - - host (host of the system) - - name (the type of the metric: snmp, snmp6 or netstat) +* All measurements have the following tags + * host (host of the system) + * name (the type of the metric: snmp, snmp6 or netstat) diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md index e691200ddd682..41684cc40550c 100644 --- a/plugins/inputs/ntpq/README.md +++ b/plugins/inputs/ntpq/README.md @@ -24,7 +24,7 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration: +## Configuration ```toml # Get standard NTP query metrics, requires ntpq executable @@ -33,27 +33,27 @@ server (RMS of difference of multiple time samples, milliseconds); dns_lookup = true ``` -### Measurements & Fields: +## Measurements & Fields - ntpq - - delay (float, milliseconds) - - jitter (float, milliseconds) - - offset (float, milliseconds) - - poll (int, seconds) - - reach (int) - - when (int, seconds) + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - reach (int) + - when (int, seconds) -### Tags: +## Tags - All measurements have the following tags: - - refid - - remote - - type - - stratum + - refid + - remote + - type + - stratum -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter ntpq --test * Plugin: ntpq, Collection 1 > ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134 diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 479634d7befb0..2ca257c2790ba 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -2,7 +2,7 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. -### Configuration +## Configuration ```toml # Pulls statistics from nvidia GPUs attached to the host @@ -16,18 +16,19 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid # timeout = "5s" ``` -#### Linux +### Linux On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` -#### Windows +### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe` You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` -### Metrics +## Metrics + - measurement: `nvidia_smi` - tags - `name` (type of GPU e.g. `GeForce GTX 1070 Ti`) @@ -61,7 +62,7 @@ You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program - `driver_version` (string) - `cuda_version` (string) -### Sample Query +## Sample Query The below query could be used to alert on the average temperature of the your GPUs over the last minute @@ -69,30 +70,34 @@ The below query could be used to alert on the average temperature of the your GP SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" ``` -### Troubleshooting +## Troubleshooting Check the full output by running `nvidia-smi` binary manually. Linux: + ```sh sudo -u telegraf -- /usr/bin/nvidia-smi -q -x ``` Windows: -``` + +```sh "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x ``` Please include the output of this command if opening an GitHub issue. -### Example Output -``` +## Example Output + +```text nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 ``` -### Limitations +## Limitations + Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index f28981f7482ae..edd9b77c99921 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -5,7 +5,7 @@ The `opcua` plugin retrieves data from OPC UA client devices. Telegraf minimum version: Telegraf 1.16 Plugin minimum tested version: 1.16 -### Configuration: +## Configuration ```toml [[inputs.opcua]] @@ -91,23 +91,28 @@ Plugin minimum tested version: 1.16 #] ``` -### Node Configuration +## Node Configuration + An OPC UA node ID may resemble: "n=3;s=Temperature". In this example: + - n=3 is indicating the `namespace` is 3 - s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 To gather data from this node enter the following line into the 'nodes' property above: -``` + +```shell {field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` This node configuration produces a metric like this: -``` + +```text opcua,id=n\=3;s\=Temperature temp=79.0,quality="OK (0x0)" 1597820490000000000 ``` -### Group Configuration +## Group Configuration + Groups can set default values for the namespace, identifier type, and tags settings. The default values apply to all the nodes in the group. If a default is set, a node may omit the setting altogether. @@ -119,7 +124,8 @@ a tag with the same name is set in both places, the tag value from the node is used. This example group configuration has two groups with two nodes each: -``` + +```toml [[inputs.opcua.group]] name="group1_metric_name" namespace="3" @@ -141,7 +147,8 @@ This example group configuration has two groups with two nodes each: ``` It produces metrics like these: -``` + +```text group1_metric_name,group1_tag=val1,id=ns\=3;i\=1001,node1_tag=val2 name=0,Quality="OK (0x0)" 1606893246000000000 group1_metric_name,group1_tag=val1,id=ns\=3;i\=1002,node1_tag=val3 name=-1.389117,Quality="OK (0x0)" 1606893246000000000 group2_metric_name,group2_tag=val3,id=ns\=3;i\=1003,node2_tag=val4 Quality="OK (0x0)",saw=-1.6 1606893246000000000 diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md index fcb175bd430f8..9b2dd44214d14 100644 --- a/plugins/inputs/openldap/README.md +++ b/plugins/inputs/openldap/README.md @@ -2,7 +2,7 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend. -### Configuration: +## Configuration To use this plugin you must enable the [slapd monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. @@ -31,11 +31,11 @@ To use this plugin you must enable the [slapd monitoring](https://www.openldap.o reverse_metric_names = true ``` -### Measurements & Fields: +## Measurements & Fields All **monitorCounter**, **monitoredInfo**, **monitorOpInitiated**, and **monitorOpCompleted** attributes are gathered based on this LDAP query: -``` +```sh (|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject)) ``` @@ -46,52 +46,52 @@ Metrics for the **monitorOp*** attributes have **_initiated** and **_completed** An OpenLDAP 2.4 server will provide these metrics: - openldap - - connections_current - - connections_max_file_descriptors - - connections_total - - operations_abandon_completed - - operations_abandon_initiated - - operations_add_completed - - operations_add_initiated - - operations_bind_completed - - operations_bind_initiated - - operations_compare_completed - - operations_compare_initiated - - operations_delete_completed - - operations_delete_initiated - - operations_extended_completed - - operations_extended_initiated - - operations_modify_completed - - operations_modify_initiated - - operations_modrdn_completed - - operations_modrdn_initiated - - operations_search_completed - - operations_search_initiated - - operations_unbind_completed - - operations_unbind_initiated - - statistics_bytes - - statistics_entries - - statistics_pdu - - statistics_referrals - - threads_active - - threads_backload - - threads_max - - threads_max_pending - - threads_open - - threads_pending - - threads_starting - - time_uptime - - waiters_read - - waiters_write - -### Tags: + - connections_current + - connections_max_file_descriptors + - connections_total + - operations_abandon_completed + - operations_abandon_initiated + - operations_add_completed + - operations_add_initiated + - operations_bind_completed + - operations_bind_initiated + - operations_compare_completed + - operations_compare_initiated + - operations_delete_completed + - operations_delete_initiated + - operations_extended_completed + - operations_extended_initiated + - operations_modify_completed + - operations_modify_initiated + - operations_modrdn_completed + - operations_modrdn_initiated + - operations_search_completed + - operations_search_initiated + - operations_unbind_completed + - operations_unbind_initiated + - statistics_bytes + - statistics_entries + - statistics_pdu + - statistics_referrals + - threads_active + - threads_backload + - threads_max + - threads_max_pending + - threads_open + - threads_pending + - threads_starting + - time_uptime + - waiters_read + - waiters_write + +## Tags - server= # value from config - port= # value from config -### Example Output: +## Example Output -``` +```shell $ telegraf -config telegraf.conf -input-filter openldap -test --debug * Plugin: inputs.openldap, Collection 1 > openldap,server=localhost,port=389,host=niska.ait.psu.edu operations_bind_initiated=10i,operations_unbind_initiated=6i,operations_modrdn_completed=0i,operations_delete_initiated=0i,operations_add_completed=2i,operations_delete_completed=0i,operations_abandon_completed=0i,statistics_entries=1516i,threads_open=2i,threads_active=1i,waiters_read=1i,operations_modify_completed=0i,operations_extended_initiated=4i,threads_pending=0i,operations_search_initiated=36i,operations_compare_initiated=0i,connections_max_file_descriptors=4096i,operations_modify_initiated=0i,operations_modrdn_initiated=0i,threads_max=16i,time_uptime=6017i,connections_total=1037i,connections_current=1i,operations_add_initiated=2i,statistics_bytes=162071i,operations_unbind_completed=6i,operations_abandon_initiated=0i,statistics_pdu=1566i,threads_max_pending=0i,threads_backload=1i,waiters_write=0i,operations_bind_completed=10i,operations_search_completed=35i,operations_compare_completed=0i,operations_extended_completed=4i,statistics_referrals=0i,threads_starting=0i 1516912070000000000 diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md index 877c3a46092b1..f1b418e7849e2 100644 --- a/plugins/inputs/openntpd/README.md +++ b/plugins/inputs/openntpd/README.md @@ -20,7 +20,7 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration +## Configuration ```toml [[inputs.openntpd]] @@ -34,7 +34,7 @@ server (RMS of difference of multiple time samples, milliseconds); # timeout = "5ms" ``` -### Metrics +## Metrics - ntpctl - tags: @@ -49,7 +49,7 @@ server (RMS of difference of multiple time samples, milliseconds); - wt (int) - tl (int) -### Permissions +## Permissions It's important to note that this plugin references ntpctl, which may require additional permissions to execute successfully. @@ -57,6 +57,7 @@ Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -69,12 +70,14 @@ telegraf : telegraf ntpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.openntpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following lines: @@ -85,9 +88,9 @@ Defaults!NTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output +## Example Output -``` +```shell openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 ``` diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index 5bbd4be89658a..ba360e45bf6fa 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -2,7 +2,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server-side SMTP protocol](https://www.opensmtpd.org/) -### Configuration: +## Configuration ```toml [[inputs.opensmtpd]] @@ -16,7 +16,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- #timeout = "1s" ``` -### Measurements & Fields: +## Measurements & Fields This is the full list of stats provided by smtpctl and potentially collected by telegram depending of your smtpctl configuration. @@ -59,12 +59,13 @@ depending of your smtpctl configuration. smtp_session_local uptime -### Permissions: +## Permissions It's important to note that this plugin references smtpctl, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -77,12 +78,14 @@ telegraf : telegraf opensmtpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.opensmtpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -93,9 +96,9 @@ Defaults!SMTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output: +## Example Output -``` +```shell telegraf --config etc/telegraf.conf --input-filter opensmtpd --test * Plugin: inputs.opensmtpd, Collection 1 > opensmtpd,host=localhost scheduler_delivery_tempfail=822,mta_host=10,mta_task_running=4,queue_bounce=13017,scheduler_delivery_permfail=51022,mta_relay=7,queue_evpcache_size=2,scheduler_envelope_expired=26,bounce_message=0,mta_domain=7,queue_evpcache_update_hit=848,smtp_session_local=12294,bounce_envelope=0,queue_evpcache_load_hit=4389703,scheduler_ramqueue_update=0,mta_route=3,scheduler_delivery_ok=2149489,smtp_session_inet4=2131997,control_session=1,scheduler_envelope_incoming=0,uptime=10346728,scheduler_ramqueue_envelope=2,smtp_session=0,bounce_session=0,mta_envelope=2,mta_session=6,mta_task=2,scheduler_ramqueue_message=2,mta_connector=7,mta_source=1,scheduler_envelope=2,scheduler_envelope_inflight=2 1510220300000000000 diff --git a/plugins/inputs/openstack/README.md b/plugins/inputs/openstack/README.md index aa2d6eea09302..c67d36333363a 100644 --- a/plugins/inputs/openstack/README.md +++ b/plugins/inputs/openstack/README.md @@ -19,9 +19,10 @@ At present this plugin requires the following APIs: * orchestration v1 ## Configuration and Recommendations + ### Recommendations -Due to the large number of unique tags that this plugin generates, in order to keep the cardinality down it is **highly recommended** to use [modifiers](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#modifiers) like `tagexclude` to discard unwanted tags. +Due to the large number of unique tags that this plugin generates, in order to keep the cardinality down it is **highly recommended** to use [modifiers](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#modifiers) like `tagexclude` to discard unwanted tags. For deployments with only a small number of VMs and hosts, a small polling interval (e.g. seconds-minutes) is acceptable. For larger deployments, polling a large number of systems will impact performance. Use the `interval` option to change how often the plugin is run: @@ -29,7 +30,7 @@ For deployments with only a small number of VMs and hosts, a small polling inter Also, consider polling OpenStack services at different intervals depending on your requirements. This will help with load and cardinality as well. -``` +```toml [[inputs.openstack]] interval = 5m .... @@ -47,10 +48,9 @@ Also, consider polling OpenStack services at different intervals depending on yo .... ``` - ### Configuration -``` +```toml ## The recommended interval to poll is '30m' ## The identity endpoint to authenticate against and get the service catalog from. @@ -105,245 +105,245 @@ Also, consider polling OpenStack services at different intervals depending on yo ### Measurements, Tags & Fields * openstack_aggregate - * name - * aggregate_host [string] - * aggregate_hosts [integer] - * created_at [string] - * deleted [boolean] - * deleted_at [string] - * id [integer] - * updated_at [string] + * name + * aggregate_host [string] + * aggregate_hosts [integer] + * created_at [string] + * deleted [boolean] + * deleted_at [string] + * id [integer] + * updated_at [string] * openstack_flavor - * is_public - * name - * disk [integer] - * ephemeral [integer] - * id [string] - * ram [integer] - * rxtx_factor [float] - * swap [integer] - * vcpus [integer] + * is_public + * name + * disk [integer] + * ephemeral [integer] + * id [string] + * ram [integer] + * rxtx_factor [float] + * swap [integer] + * vcpus [integer] * openstack_hypervisor - * cpu_arch - * cpu_feature_tsc - * cpu_feature_tsc-deadline - * cpu_feature_tsc_adjust - * cpu_feature_tsx-ctrl - * cpu_feature_vme - * cpu_feature_vmx - * cpu_feature_x2apic - * cpu_feature_xgetbv1 - * cpu_feature_xsave - * cpu_model - * cpu_vendor - * hypervisor_hostname - * hypervisor_type - * hypervisor_version - * service_host - * service_id - * state - * status - * cpu_topology_cores [integer] - * cpu_topology_sockets [integer] - * cpu_topology_threads [integer] - * current_workload [integer] - * disk_available_least [integer] - * free_disk_gb [integer] - * free_ram_mb [integer] - * host_ip [string] - * id [string] - * local_gb [integer] - * local_gb_used [integer] - * memory_mb [integer] - * memory_mb_used [integer] - * running_vms [integer] - * vcpus [integer] - * vcpus_used [integer] + * cpu_arch + * cpu_feature_tsc + * cpu_feature_tsc-deadline + * cpu_feature_tsc_adjust + * cpu_feature_tsx-ctrl + * cpu_feature_vme + * cpu_feature_vmx + * cpu_feature_x2apic + * cpu_feature_xgetbv1 + * cpu_feature_xsave + * cpu_model + * cpu_vendor + * hypervisor_hostname + * hypervisor_type + * hypervisor_version + * service_host + * service_id + * state + * status + * cpu_topology_cores [integer] + * cpu_topology_sockets [integer] + * cpu_topology_threads [integer] + * current_workload [integer] + * disk_available_least [integer] + * free_disk_gb [integer] + * free_ram_mb [integer] + * host_ip [string] + * id [string] + * local_gb [integer] + * local_gb_used [integer] + * memory_mb [integer] + * memory_mb_used [integer] + * running_vms [integer] + * vcpus [integer] + * vcpus_used [integer] * openstack_identity - * description - * domain_id - * name - * parent_id - * enabled boolean - * id string - * is_domain boolean - * projects integer + * description + * domain_id + * name + * parent_id + * enabled boolean + * id string + * is_domain boolean + * projects integer * openstack_network - * name - * openstack_tags_xyz - * project_id - * status - * tenant_id - * admin_state_up [boolean] - * availability_zone_hints [string] - * created_at [string] - * id [string] - * shared [boolean] - * subnet_id [string] - * subnets [integer] - * updated_at [string] + * name + * openstack_tags_xyz + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * availability_zone_hints [string] + * created_at [string] + * id [string] + * shared [boolean] + * subnet_id [string] + * subnets [integer] + * updated_at [string] * openstack_newtron_agent - * agent_host - * agent_type - * availability_zone - * binary - * topic - * admin_state_up [boolean] - * alive [boolean] - * created_at [string] - * heartbeat_timestamp [string] - * id [string] - * resources_synced [boolean] - * started_at [string] + * agent_host + * agent_type + * availability_zone + * binary + * topic + * admin_state_up [boolean] + * alive [boolean] + * created_at [string] + * heartbeat_timestamp [string] + * id [string] + * resources_synced [boolean] + * started_at [string] * openstack_nova_service - * host_machine - * name - * state - * status - * zone - * disabled_reason [string] - * forced_down [boolean] - * id [string] - * updated_at [string] + * host_machine + * name + * state + * status + * zone + * disabled_reason [string] + * forced_down [boolean] + * id [string] + * updated_at [string] * openstack_port - * device_id - * device_owner - * name - * network_id - * project_id - * status - * tenant_id - * admin_state_up [boolean] - * allowed_address_pairs [integer] - * fixed_ips [integer] - * id [string] - * ip_address [string] - * mac_address [string] - * security_groups [string] - * subnet_id [string] + * device_id + * device_owner + * name + * network_id + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * allowed_address_pairs [integer] + * fixed_ips [integer] + * id [string] + * ip_address [string] + * mac_address [string] + * security_groups [string] + * subnet_id [string] * openstack_request_duration - * agents [integer] - * aggregates [integer] - * flavors [integer] - * hypervisors [integer] - * networks [integer] - * nova_services [integer] - * ports [integer] - * projects [integer] - * servers [integer] - * stacks [integer] - * storage_pools [integer] - * subnets [integer] - * volumes [integer] + * agents [integer] + * aggregates [integer] + * flavors [integer] + * hypervisors [integer] + * networks [integer] + * nova_services [integer] + * ports [integer] + * projects [integer] + * servers [integer] + * stacks [integer] + * storage_pools [integer] + * subnets [integer] + * volumes [integer] * openstack_server - * flavor - * host_id - * host_name - * image - * key_name - * name - * project - * status - * tenant_id - * user_id - * accessIPv4 [string] - * accessIPv6 [string] - * addresses [integer] - * adminPass [string] - * created [string] - * disk_gb [integer] - * fault_code [integer] - * fault_created [string] - * fault_details [string] - * fault_message [string] - * id [string] - * progress [integer] - * ram_mb [integer] - * security_groups [integer] - * updated [string] - * vcpus [integer] - * volume_id [string] - * volumes_attached [integer] + * flavor + * host_id + * host_name + * image + * key_name + * name + * project + * status + * tenant_id + * user_id + * accessIPv4 [string] + * accessIPv6 [string] + * addresses [integer] + * adminPass [string] + * created [string] + * disk_gb [integer] + * fault_code [integer] + * fault_created [string] + * fault_details [string] + * fault_message [string] + * id [string] + * progress [integer] + * ram_mb [integer] + * security_groups [integer] + * updated [string] + * vcpus [integer] + * volume_id [string] + * volumes_attached [integer] * openstack_server_diagnostics - * disk_name - * no_of_disks - * no_of_ports - * port_name - * server_id - * cpu0_time [float] - * cpu1_time [float] - * cpu2_time [float] - * cpu3_time [float] - * cpu4_time [float] - * cpu5_time [float] - * cpu6_time [float] - * cpu7_time [float] - * disk_errors [float] - * disk_read [float] - * disk_read_req [float] - * disk_write [float] - * disk_write_req [float] - * memory [float] - * memory-actual [float] - * memory-rss [float] - * memory-swap_in [float] - * port_rx [float] - * port_rx_drop [float] - * port_rx_errors [float] - * port_rx_packets [float] - * port_tx [float] - * port_tx_drop [float] - * port_tx_errors [float] - * port_tx_packets [float] + * disk_name + * no_of_disks + * no_of_ports + * port_name + * server_id + * cpu0_time [float] + * cpu1_time [float] + * cpu2_time [float] + * cpu3_time [float] + * cpu4_time [float] + * cpu5_time [float] + * cpu6_time [float] + * cpu7_time [float] + * disk_errors [float] + * disk_read [float] + * disk_read_req [float] + * disk_write [float] + * disk_write_req [float] + * memory [float] + * memory-actual [float] + * memory-rss [float] + * memory-swap_in [float] + * port_rx [float] + * port_rx_drop [float] + * port_rx_errors [float] + * port_rx_packets [float] + * port_tx [float] + * port_tx_drop [float] + * port_tx_errors [float] + * port_tx_packets [float] * openstack_service - * name - * service_enabled [boolean] - * service_id [string] + * name + * service_enabled [boolean] + * service_id [string] * openstack_storage_pool - * driver_version - * name - * storage_protocol - * vendor_name - * volume_backend_name - * free_capacity_gb [float] - * total_capacity_gb [float] + * driver_version + * name + * storage_protocol + * vendor_name + * volume_backend_name + * free_capacity_gb [float] + * total_capacity_gb [float] * openstack_subnet - * cidr - * gateway_ip - * ip_version - * name - * network_id - * openstack_tags_subnet_type_PRV - * project_id - * tenant_id - * allocation_pools [string] - * dhcp_enabled [boolean] - * dns_nameservers [string] - * id [string] + * cidr + * gateway_ip + * ip_version + * name + * network_id + * openstack_tags_subnet_type_PRV + * project_id + * tenant_id + * allocation_pools [string] + * dhcp_enabled [boolean] + * dns_nameservers [string] + * id [string] * openstack_volume - * attachment_attachment_id - * attachment_device - * attachment_host_name - * availability_zone - * bootable - * description - * name - * status - * user_id - * volume_type - * attachment_attached_at [string] - * attachment_server_id [string] - * created_at [string] - * encrypted [boolean] - * id [string] - * multiattach [boolean] - * size [integer] - * total_attachments [integer] - * updated_at [string] + * attachment_attachment_id + * attachment_device + * attachment_host_name + * availability_zone + * bootable + * description + * name + * status + * user_id + * volume_type + * attachment_attached_at [string] + * attachment_server_id [string] + * created_at [string] + * encrypted [boolean] + * id [string] + * multiattach [boolean] + * size [integer] + * total_attachments [integer] + * updated_at [string] ### Example Output -``` +```text > openstack_newtron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 > openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000 > openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000 diff --git a/plugins/inputs/opentelemetry/README.md b/plugins/inputs/opentelemetry/README.md index 20cc36d5d0403..0f83a469cd59c 100644 --- a/plugins/inputs/opentelemetry/README.md +++ b/plugins/inputs/opentelemetry/README.md @@ -2,7 +2,7 @@ This plugin receives traces, metrics and logs from [OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC. -### Configuration +## Configuration ```toml [[inputs.opentelemetry]] @@ -30,11 +30,11 @@ This plugin receives traces, metrics and logs from [OpenTelemetry](https://opent # tls_key = "/etc/telegraf/key.pem" ``` -#### Schema +### Schema The OpenTelemetry->InfluxDB conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/otel2influx) -are hosted at https://github.com/influxdata/influxdb-observability . +are hosted at . Spans are stored in measurement `spans`. Logs are stored in measurement `logs`. @@ -48,7 +48,8 @@ Also see the OpenTelemetry output plugin for Telegraf. ### Example Output #### Tracing Spans -``` + +```text spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 @@ -57,7 +58,8 @@ spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation ``` ### Metrics - `prometheus-v1` -``` + +```shell cpu_temp,foo=bar gauge=87.332 http_requests_total,method=post,code=200 counter=1027 http_requests_total,method=post,code=400 counter=3 @@ -66,7 +68,8 @@ rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560 ``` ### Metrics - `prometheus-v2` -``` + +```shell prometheus,foo=bar cpu_temp=87.332 prometheus,method=post,code=200 http_requests_total=1027 prometheus,method=post,code=400 http_requests_total=3 @@ -85,7 +88,8 @@ prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_s ``` ### Logs -``` + +```text logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 logs fluent.tag="fluent.info",worker=0i 1613769568896515100 diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 85803f76ab046..8ee7dce2d5d8b 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -6,11 +6,11 @@ To use this plugin you will need an [api key][] (app_id). City identifiers can be found in the [city list][]. Alternately you can [search][] by name; the `city_id` can be found as the last digits -of the URL: https://openweathermap.org/city/2643743. Language +of the URL: . Language identifiers can be found in the [lang list][]. Documentation for condition ID, icon, and main is at [weather conditions][]. -### Configuration +## Configuration ```toml [[inputs.openweathermap]] @@ -44,7 +44,7 @@ condition ID, icon, and main is at [weather conditions][]. interval = "10m" ``` -### Metrics +## Metrics - weather - tags: @@ -66,10 +66,9 @@ condition ID, icon, and main is at [weather conditions][]. - condition_description (string, localized long description) - condition_icon +## Example Output -### Example Output - -``` +```shell > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000 > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000 > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000 diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md index 6821635103d78..4300fd362dc24 100644 --- a/plugins/inputs/passenger/README.md +++ b/plugins/inputs/passenger/README.md @@ -2,7 +2,7 @@ Gather [Phusion Passenger](https://www.phusionpassenger.com/) metrics using the `passenger-status` command line utility. -**Series Cardinality Warning** +## Series Cardinality Warning Depending on your environment, this `passenger_process` measurement of this plugin can quickly create a high number of series which, when unchecked, can @@ -20,7 +20,7 @@ manage your series cardinality: - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration +## Configuration ```toml # Read metrics of passenger using passenger-status @@ -36,11 +36,11 @@ manage your series cardinality: command = "passenger-status -v --show=xml" ``` -#### Permissions: +### Permissions Telegraf must have permission to execute the `passenger-status` command. On most systems, Telegraf runs as the `telegraf` user. -### Metrics: +## Metrics - passenger - tags: @@ -95,8 +95,9 @@ Telegraf must have permission to execute the `passenger-status` command. On mos - real_memory - vmsize -### Example Output: -``` +## Example Output + +```shell passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257 passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977 passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021 diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 9d4e2ad47c1b8..cef92498791e9 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -7,9 +7,9 @@ The pf plugin retrieves this information by invoking the `pfstat` command. The ` * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. * Configure sudo to grant telegraf to run `pfctl` as root. This is the most restrictive option, but require sudo setup. -* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. +* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. -### Using sudo +## Using sudo You may edit your sudo configuration with the following: @@ -17,40 +17,39 @@ You may edit your sudo configuration with the following: telegraf ALL=(root) NOPASSWD: /sbin/pfctl -s info ``` -### Configuration: +## Configuration ```toml # use sudo to run pfctl use_sudo = false ``` -### Measurements & Fields: +## Measurements & Fields +* pf + * entries (integer, count) + * searches (integer, count) + * inserts (integer, count) + * removals (integer, count) + * match (integer, count) + * bad-offset (integer, count) + * fragment (integer, count) + * short (integer, count) + * normalize (integer, count) + * memory (integer, count) + * bad-timestamp (integer, count) + * congestion (integer, count) + * ip-option (integer, count) + * proto-cksum (integer, count) + * state-mismatch (integer, count) + * state-insert (integer, count) + * state-limit (integer, count) + * src-limit (integer, count) + * synproxy (integer, count) -- pf - - entries (integer, count) - - searches (integer, count) - - inserts (integer, count) - - removals (integer, count) - - match (integer, count) - - bad-offset (integer, count) - - fragment (integer, count) - - short (integer, count) - - normalize (integer, count) - - memory (integer, count) - - bad-timestamp (integer, count) - - congestion (integer, count) - - ip-option (integer, count) - - proto-cksum (integer, count) - - state-mismatch (integer, count) - - state-insert (integer, count) - - state-limit (integer, count) - - src-limit (integer, count) - - synproxy (integer, count) +## Example Output -### Example Output: - -``` +```text > pfctl -s info Status: Enabled for 0 days 00:26:05 Debug: Urgent @@ -77,7 +76,7 @@ Counters synproxy 0 0.0/s ``` -``` +```shell > ./telegraf --config telegraf.conf --input-filter pf --test * Plugin: inputs.pf, Collection 1 > pf,host=columbia entries=3i,searches=2668i,inserts=12i,removals=9i 1510941775000000000 diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 53737a81ad098..abb7fcd35fde2 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -7,7 +7,7 @@ More information about the meaning of these metrics can be found in the - PgBouncer minimum tested version: 1.5 -### Configuration example +## Configuration example ```toml [[inputs.pgbouncer]] @@ -22,7 +22,7 @@ More information about the meaning of these metrics can be found in the address = "host=localhost user=pgbouncer sslmode=disable" ``` -#### `address` +### `address` Specify address via a postgresql connection string: @@ -37,7 +37,7 @@ All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. -### Metrics +## Metrics - pgbouncer - tags: @@ -57,7 +57,7 @@ This dbname is just for instantiating a connection with the server and doesn't r - total_xact_count - total_xact_time -+ pgbouncer_pools +- pgbouncer_pools - tags: - db - pool_mode @@ -74,9 +74,9 @@ This dbname is just for instantiating a connection with the server and doesn't r - sv_tested - sv_used -### Example Output +## Example Output -``` +```shell pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000 pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000 ``` diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index b31f4b7e427bd..8e7a6960ccf40 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -2,7 +2,7 @@ Get phpfpm stats using either HTTP status page or fpm socket. -### Configuration: +## Configuration ```toml # Read metrics of phpfpm, via HTTP status page or socket @@ -44,7 +44,7 @@ Get phpfpm stats using either HTTP status page or fpm socket. When using `unixsocket`, you have to ensure that telegraf runs on same host, and socket path is accessible to telegraf user. -### Metrics: +## Metrics - phpfpm - tags: @@ -62,9 +62,9 @@ host, and socket path is accessible to telegraf user. - max_children_reached - slow_requests -# Example Output +## Example Output -``` +```shell phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187 phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422 phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658 diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 10744a9b15e99..03ab366933678 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -13,7 +13,8 @@ ping packets. Most ping command implementations are supported, one notable exception being that there is currently no support for GNU Inetutils ping. You may instead use the iputils-ping implementation: -``` + +```sh apt-get install iputils-ping ``` @@ -21,7 +22,7 @@ When using `method = "native"` a ping is sent and the results are reported in native Go by the Telegraf process, eliminating the need to execute the system `ping` command. -### Configuration: +## Configuration ```toml [[inputs.ping]] @@ -76,7 +77,7 @@ native Go by the Telegraf process, eliminating the need to execute the system # size = 56 ``` -#### File Limit +### File Limit Since this plugin runs the ping command, it may need to open multiple files per host. The number of files used is lessened with the `native` option but still @@ -88,42 +89,49 @@ use the "drop-in directory", usually located at `/etc/systemd/system/telegraf.service.d`. You can create or edit a drop-in file in the correct location using: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` Increase the number of open files: + ```ini [Service] LimitNOFILE=8192 ``` Restart Telegraf: + ```sh -$ systemctl restart telegraf +systemctl restart telegraf ``` -#### Linux Permissions +### Linux Permissions When using `method = "native"`, Telegraf will attempt to use privileged raw ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities or for Telegraf to be run as root. With systemd: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` + ```ini [Service] CapabilityBoundingSet=CAP_NET_RAW AmbientCapabilities=CAP_NET_RAW ``` + ```sh -$ systemctl restart telegraf +systemctl restart telegraf ``` Without systemd: + ```sh -$ setcap cap_net_raw=eip /usr/bin/telegraf +setcap cap_net_raw=eip /usr/bin/telegraf ``` Reference [`man 7 capabilities`][man 7 capabilities] for more information about @@ -131,11 +139,11 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -#### Other OS Permissions +### Other OS Permissions -When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. +When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. -### Metrics +## Metrics - ping - tags: @@ -155,19 +163,18 @@ When using `method = "native"`, you will need permissions similar to the executa - percent_reply_loss (float, Windows with method = "exec" only) - result_code (int, success = 0, no such host = 1, ping error = 2) -##### reply_received vs packets_received +### reply_received vs packets_received On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*. -##### ttl +### ttl There is currently no support for TTL on windows with `"native"`; track -progress at https://github.com/golang/go/issues/7175 and -https://github.com/golang/go/issues/7174 - +progress at and + -### Example Output +## Example Output -``` +```shell ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 2fdfacd9d193c..5d42c881db487 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -3,11 +3,11 @@ The postfix plugin reports metrics on the postfix queues. For each of the active, hold, incoming, maildrop, and deferred queues -(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue +(), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds). -### Configuration +## Configuration ```toml [[inputs.postfix]] @@ -16,7 +16,7 @@ item in seconds). # queue_directory = "/var/spool/postfix" ``` -#### Permissions +### Permissions Telegraf will need read access to the files in the queue directory. You may need to alter the permissions of these directories to provide access to the @@ -26,20 +26,22 @@ This can be setup either using standard unix permissions or with Posix ACLs, you will only need to use one method: Unix permissions: + ```sh -$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo usermod -a -G postdrop telegraf -$ sudo chmod g+r /var/spool/postfix/maildrop +sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} +sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} +sudo usermod -a -G postdrop telegraf +sudo chmod g+r /var/spool/postfix/maildrop ``` Posix ACL: + ```sh -$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ -$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ ``` -### Metrics +## Metrics - postfix_queue - tags: @@ -49,10 +51,9 @@ $ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ - size (integer, bytes) - age (integer, seconds) +## Example Output -### Example Output - -``` +```shell postfix_queue,queue=active length=3,size=12345,age=9 postfix_queue,queue=hold length=0,size=0,age=0 postfix_queue,queue=maildrop length=1,size=2000,age=2 diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index 627fd2dbbfa88..d6771ade60b44 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -1,7 +1,8 @@ # PostgreSQL Input Plugin This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on your version of postgres. See table: -``` + +```sh pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) --- --- --- ------- ------- ------- datid x x x x @@ -27,10 +28,10 @@ stats_reset* x x _* value ignored and therefore not recorded._ - More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) ## Configuration + Specify address via a postgresql connection string: `host=localhost port=5432 user=telegraf database=telegraf` @@ -52,11 +53,13 @@ A list of databases to pull metrics about. If not specified, metrics for all dat ### TLS Configuration Add the `sslkey`, `sslcert` and `sslrootcert` options to your DSN: -``` + +```shell host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem ``` ### Configuration example + ```toml [[inputs.postgresql]] address = "postgres://telegraf@localhost/someDB" diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 70464140aedf4..7afddbfdee7f9 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -78,9 +78,11 @@ The example below has two queries are specified, with the following parameters: The system can be easily extended using homemade metrics collection tools or using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) -# Sample Queries : -- telegraf.conf postgresql_extensible queries (assuming that you have configured +## Sample Queries + +* telegraf.conf postgresql_extensible queries (assuming that you have configured correctly your connection) + ```toml [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" @@ -132,27 +134,33 @@ using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs tagvalue="type,enabled" ``` -# Postgresql Side +## Postgresql Side + postgresql.conf : -``` + +```sql shared_preload_libraries = 'pg_stat_statements,pg_stat_kcache' ``` Please follow the requirements to setup those extensions. In the database (can be a specific monitoring db) -``` + +```sql create extension pg_stat_statements; create extension pg_stat_kcache; create extension pg_proctab; ``` + (assuming that the extension is installed on the OS Layer) - - pg_stat_kcache is available on the postgresql.org yum repo - - pg_proctab is available at : https://github.com/markwkm/pg_proctab +* pg_stat_kcache is available on the postgresql.org yum repo +* pg_proctab is available at : + +## Views + +* Blocking sessions - ## Views - - Blocking sessions ```sql CREATE OR REPLACE VIEW public.blocking_procs AS SELECT a.datname AS db, @@ -176,7 +184,9 @@ CREATE OR REPLACE VIEW public.blocking_procs AS WHERE kl.granted AND NOT bl.granted ORDER BY a.query_start; ``` - - Sessions Statistics + +* Sessions Statistics + ```sql CREATE OR REPLACE VIEW public.sessions AS WITH proctab AS ( diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index a6bad660fc37b..160c3d6d26849 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -2,7 +2,7 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. -### Configuration: +## Configuration ```toml # Description @@ -14,17 +14,18 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ``` -#### Permissions +### Permissions Telegraf will need read access to the powerdns control socket. On many systems this can be accomplished by adding the `telegraf` user to the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` -### Measurements & Fields: +## Measurements & Fields - powerdns - corrupt-packets @@ -66,13 +67,13 @@ usermod telegraf -a -G pdns - uptime - user-msec -### Tags: +## Tags - tags: `server=socket` -### Example Output: +## Example Output -``` +```sh $ ./telegraf --config telegraf.conf --input-filter powerdns --test > powerdns,server=/var/run/pdns.controlsocket corrupt-packets=0i,deferred-cache-inserts=0i,deferred-cache-lookup=0i,dnsupdate-answers=0i,dnsupdate-changes=0i,dnsupdate-queries=0i,dnsupdate-refused=0i,key-cache-size=0i,latency=26i,meta-cache-size=0i,packetcache-hit=0i,packetcache-miss=1i,packetcache-size=0i,qsize-q=0i,query-cache-hit=0i,query-cache-miss=6i,rd-queries=1i,recursing-answers=0i,recursing-questions=0i,recursion-unanswered=0i,security-status=3i,servfail-packets=0i,signature-cache-size=0i,signatures=0i,sys-msec=4349i,tcp-answers=0i,tcp-queries=0i,timedout-packets=0i,udp-answers=1i,udp-answers-bytes=50i,udp-do-queries=0i,udp-queries=0i,udp4-answers=1i,udp4-queries=1i,udp6-answers=0i,udp6-queries=0i,uptime=166738i,user-msec=3036i 1454078624932715706 ``` diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md index 09192db35ad2b..5cb8347f87571 100644 --- a/plugins/inputs/powerdns_recursor/README.md +++ b/plugins/inputs/powerdns_recursor/README.md @@ -3,7 +3,7 @@ The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using the unix controlsocket. -### Configuration +## Configuration ```toml [[inputs.powerdns_recursor]] @@ -17,7 +17,7 @@ the unix controlsocket. # socket_mode = "0666" ``` -#### Permissions +### Permissions Telegraf will need read/write access to the control socket and to the `socket_dir`. PowerDNS will need to be able to write to the `socket_dir`. @@ -27,25 +27,28 @@ adapted for other systems. First change permissions on the controlsocket in the PowerDNS recursor configuration, usually in `/etc/powerdns/recursor.conf`: -``` + +```sh socket-mode = 660 ``` Then place the `telegraf` user into the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` Since `telegraf` cannot write to to the default `/var/run` socket directory, create a subdirectory and adjust permissions for this directory so that both users can access it. + ```sh -$ mkdir /var/run/pdns -$ chown root:pdns /var/run/pdns -$ chmod 770 /var/run/pdns +mkdir /var/run/pdns +chown root:pdns /var/run/pdns +chmod 770 /var/run/pdns ``` -### Metrics +## Metrics - powerdns_recursor - tags: @@ -156,8 +159,8 @@ $ chmod 770 /var/run/pdns - x-ourtime4-8 - x-ourtime8-16 -### Example Output +## Example Output -``` +```shell powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 ``` diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md index 756326d75246d..ac561f9361660 100644 --- a/plugins/inputs/processes/README.md +++ b/plugins/inputs/processes/README.md @@ -8,7 +8,7 @@ it requires access to execute `ps`. **Supported Platforms**: Linux, FreeBSD, Darwin -### Configuration +## Configuration ```toml # Get the number of processes and group them by status @@ -21,7 +21,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info `docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc` -### Metrics +## Metrics - processes - fields: @@ -38,13 +38,13 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info - parked (linux only) - total_threads (linux only) -### Process State Mappings +## Process State Mappings Different OSes use slightly different State codes for their processes, these state codes are documented in `man ps`, and I will give a mapping of what major OS state codes correspond to in telegraf metrics: -``` +```sh Linux FreeBSD Darwin meaning R R R running S S S sleeping @@ -56,8 +56,8 @@ Linux FreeBSD Darwin meaning W W none paging (linux kernel < 2.6 only), wait (freebsd) ``` -### Example Output +## Example Output -``` +```shell processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index f0b9858601ade..60d213cd0c50d 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -5,6 +5,7 @@ The procstat_lookup metric displays the query information, specifically the number of PIDs returned on a search Processes can be selected for monitoring using one of several methods: + - pidfile - exe - pattern @@ -13,7 +14,7 @@ Processes can be selected for monitoring using one of several methods: - cgroup - win_service -### Configuration: +## Configuration ```toml # Monitor process cpu and memory usage @@ -63,12 +64,12 @@ Processes can be selected for monitoring using one of several methods: # pid_finder = "pgrep" ``` -#### Windows support +### Windows support Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. -### Metrics: +## Metrics - procstat - tags: @@ -161,9 +162,9 @@ the `win_perf_counters` input plugin as a more mature alternative. *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* -### Example Output: +## Example Output -``` +```shell procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000 procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000 ``` diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index fe6d3a8e816da..6b94e4be8bd92 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -3,7 +3,7 @@ The prometheus input plugin gathers metrics from HTTP servers exposing metrics in Prometheus format. -### Configuration: +## Configuration ```toml # Read metrics from one or many prometheus clients @@ -49,7 +49,7 @@ in Prometheus format. ## Only for node scrape scope: node IP of the node that telegraf is running on. ## Either this config or the environment variable NODE_IP must be set. # node_ip = "10.180.1.1" - + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. ## Default is 60 seconds. # pod_scrape_interval = 60 @@ -100,7 +100,7 @@ in Prometheus format. `urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics` -#### Kubernetes Service Discovery +### Kubernetes Service Discovery URLs listed in the `kubernetes_services` parameter will be expanded by looking up all A records assigned to the hostname as described in @@ -109,7 +109,7 @@ by looking up all A records assigned to the hostname as described in This method can be used to locate all [Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services). -#### Kubernetes scraping +### Kubernetes scraping Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig @@ -124,7 +124,8 @@ Currently the following annotation are supported: Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. Using `pod_scrape_scope = "node"` allows more scalable scraping for pods which will scrape pods only in the node that telegraf is running. It will fetch the pod list locally from the node's kubelet. This will require running Telegraf in every node of the cluster. Note that either `node_ip` must be specified in the config or the environment variable `NODE_IP` must be set to the host IP. ThisThe latter can be done in the yaml of the pod running telegraf: -``` + +```sh env: - name: NODE_IP valueFrom: @@ -134,7 +135,7 @@ env: If using node level scrape scope, `pod_scrape_interval` specifies how often (in seconds) the pod list for scraping should updated. If not specified, the default is 60 seconds. -#### Consul Service Discovery +### Consul Service Discovery Enabling this option and configuring consul `agent` url will allow the plugin to query consul catalog for available services. Using `query_interval` the plugin will periodically @@ -143,6 +144,7 @@ It can use the information from the catalog to build the scraped url and additio Multiple consul queries can be configured, each for different service. The following example fields can be used in url or tag templates: + * Node * Address * NodeMeta @@ -152,15 +154,15 @@ The following example fields can be used in url or tag templates: * ServiceMeta For full list of available fields and their type see struct CatalogService in -https://github.com/hashicorp/consul/blob/master/api/catalog.go + -#### Bearer Token +### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on each interval and its contents will be appended to the Bearer string in the Authorization header. -### Usage for Caddy HTTP server +## Usage for Caddy HTTP server Steps to monitor Caddy with Telegraf's Prometheus input plugin: @@ -178,7 +180,7 @@ Steps to monitor Caddy with Telegraf's Prometheus input plugin: > This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). -### Metrics: +## Metrics Measurement names are based on the Metric Family and tags are created for each label. The value is added to a field named based on the metric type. @@ -187,10 +189,11 @@ All metrics receive the `url` tag indicating the related URL specified in the Telegraf configuration. If using Kubernetes service discovery the `address` tag is also added indicating the discovered ip address. -### Example Output: +## Example Output -**Source** -``` +### Source + +```shell # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 7.4545e-05 @@ -211,8 +214,9 @@ cpu_usage_user{cpu="cpu2"} 2.0161290322588776 cpu_usage_user{cpu="cpu3"} 1.5045135406226022 ``` -**Output** -``` +### Output + +```shell go_gc_duration_seconds,url=http://example.org:9273/metrics 1=0.001336611,count=14,sum=0.004527551,0=0.000057965,0.25=0.000083812,0.5=0.000286537,0.75=0.000365303 1505776733000000000 go_goroutines,url=http://example.org:9273/metrics gauge=21 1505776695000000000 cpu_usage_user,cpu=cpu0,url=http://example.org:9273/metrics gauge=1.513622603430151 1505776751000000000 @@ -221,8 +225,9 @@ cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805 cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000 ``` -**Output (when metric_version = 2)** -``` +### Output (when metric_version = 2) + +```shell prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000 prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000 prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000 diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index db9f57e974d2d..4b76ce5c326a3 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -4,7 +4,7 @@ The proxmox plugin gathers metrics about containers and VMs using the Proxmox AP Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration ```toml [[inputs.proxmox]] @@ -25,13 +25,13 @@ Telegraf minimum version: Telegraf 1.16.0 response_timeout = "5s" ``` -#### Permissions +### Permissions The plugin will need to have access to the Proxmox API. An API token must be provided with the corresponding user being assigned at least the PVEAuditor role on /. -### Measurements & Fields: +## Measurements & Fields - proxmox - status @@ -50,16 +50,16 @@ role on /. - disk_free - disk_used_percentage -### Tags: +## Tags - - node_fqdn - FQDN of the node telegraf is running on - - vm_name - Name of the VM/container - - vm_fqdn - FQDN of the VM/container - - vm_type - Type of the VM/container (lxc, qemu) +- node_fqdn - FQDN of the node telegraf is running on +- vm_name - Name of the VM/container +- vm_fqdn - FQDN of the VM/container +- vm_type - Type of the VM/container (lxc, qemu) -### Example Output: +## Example Output -``` +```text $ ./telegraf --config telegraf.conf --input-filter proxmox --test > proxmox,host=pxnode,node_fqdn=pxnode.example.com,vm_fqdn=vm1.example.com,vm_name=vm1,vm_type=lxc cpuload=0.147998116735236,disk_free=4461129728i,disk_total=5217320960i,disk_used=756191232i,disk_used_percentage=14,mem_free=1046827008i,mem_total=1073741824i,mem_used=26914816i,mem_used_percentage=2,status="running",swap_free=536698880i,swap_total=536870912i,swap_used=172032i,swap_used_percentage=0,uptime=1643793i 1595457277000000000 > ... diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 1406064d5c617..db85dfe94a589 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -1,12 +1,12 @@ # PuppetAgent Input Plugin -#### Description +## Description The puppetagent plugin collects variables outputted from the 'last_run_summary.yaml' file usually located in `/var/lib/puppet/state/` [PuppetAgent Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs/). -``` +```sh cat /var/lib/puppet/state/last_run_summary.yaml --- @@ -45,7 +45,7 @@ cat /var/lib/puppet/state/last_run_summary.yaml puppet: "3.7.5" ``` -``` +```sh jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent --config tele.conf --test * Plugin: puppetagent, Collection 1 > [] puppetagent_events_failure value=0 @@ -77,65 +77,72 @@ jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent - > [] puppetagent_version_puppet value=3.7.5 ``` -## Measurements: -#### PuppetAgent int64 measurements: +## Measurements + +### PuppetAgent int64 measurements Meta: + - units: int64 - tags: `` Measurement names: - - puppetagent_changes_total - - puppetagent_events_failure - - puppetagent_events_total - - puppetagent_events_success - - puppetagent_resources_changed - - puppetagent_resources_corrective_change - - puppetagent_resources_failed - - puppetagent_resources_failedtorestart - - puppetagent_resources_outofsync - - puppetagent_resources_restarted - - puppetagent_resources_scheduled - - puppetagent_resources_skipped - - puppetagent_resources_total - - puppetagent_time_service - - puppetagent_time_lastrun - - puppetagent_version_config - -#### PuppetAgent float64 measurements: + +- puppetagent_changes_total +- puppetagent_events_failure +- puppetagent_events_total +- puppetagent_events_success +- puppetagent_resources_changed +- puppetagent_resources_corrective_change +- puppetagent_resources_failed +- puppetagent_resources_failedtorestart +- puppetagent_resources_outofsync +- puppetagent_resources_restarted +- puppetagent_resources_scheduled +- puppetagent_resources_skipped +- puppetagent_resources_total +- puppetagent_time_service +- puppetagent_time_lastrun +- puppetagent_version_config + +### PuppetAgent float64 measurements Meta: + - units: float64 - tags: `` Measurement names: - - puppetagent_time_anchor - - puppetagent_time_catalogapplication - - puppetagent_time_configretrieval - - puppetagent_time_convertcatalog - - puppetagent_time_cron - - puppetagent_time_exec - - puppetagent_time_factgeneration - - puppetagent_time_file - - puppetagent_time_filebucket - - puppetagent_time_group - - puppetagent_time_lastrun - - puppetagent_time_noderetrieval - - puppetagent_time_notify - - puppetagent_time_package - - puppetagent_time_pluginsync - - puppetagent_time_schedule - - puppetagent_time_sshauthorizedkey - - puppetagent_time_total - - puppetagent_time_transactionevaluation - - puppetagent_time_user - - puppetagent_version_config - -#### PuppetAgent string measurements: + +- puppetagent_time_anchor +- puppetagent_time_catalogapplication +- puppetagent_time_configretrieval +- puppetagent_time_convertcatalog +- puppetagent_time_cron +- puppetagent_time_exec +- puppetagent_time_factgeneration +- puppetagent_time_file +- puppetagent_time_filebucket +- puppetagent_time_group +- puppetagent_time_lastrun +- puppetagent_time_noderetrieval +- puppetagent_time_notify +- puppetagent_time_package +- puppetagent_time_pluginsync +- puppetagent_time_schedule +- puppetagent_time_sshauthorizedkey +- puppetagent_time_total +- puppetagent_time_transactionevaluation +- puppetagent_time_user +- puppetagent_version_config + +### PuppetAgent string measurements Meta: + - units: string - tags: `` Measurement names: - - puppetagent_version_puppet + +- puppetagent_version_puppet diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 5f106642adeb6..b89cd4da6e1ed 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -7,7 +7,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management [management]: https://www.rabbitmq.com/management.html [management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html -### Configuration +## Configuration ```toml [[inputs.rabbitmq]] @@ -66,7 +66,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management # federation_upstream_exclude = [] ``` -### Metrics +## Metrics - rabbitmq_overview - tags: @@ -90,7 +90,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - return_unroutable (int, number of unroutable messages) - return_unroutable_rate (float, number of unroutable messages per second) -+ rabbitmq_node +- rabbitmq_node - tags: - url - node @@ -182,7 +182,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - slave_nodes (int, count) - synchronised_slave_nodes (int, count) -+ rabbitmq_exchange +- rabbitmq_exchange - tags: - url - exchange @@ -217,17 +217,17 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - messages_publish (int, count) - messages_return_unroutable (int, count) -### Sample Queries +## Sample Queries Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: -``` +```sql SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) ``` -### Example Output +## Example Output -``` +```text rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000 rabbitmq_overview,url=http://amqp.example.org:15672,host=amqp.example.org channels=2i,consumers=1i,exchanges=17i,messages_acked=329i,messages=0i,messages_ready=0i,messages_unacked=0i,connections=2i,queues=1i,messages_delivered=329i,messages_published=329i,clustering_listeners=2i,amqp_listeners=1i 1493684035000000000 rabbitmq_node,url=http://amqp.example.org:15672,node=rabbit@amqp.example.org,host=amqp.example.org fd_total=1024i,fd_used=32i,mem_limit=8363329126i,sockets_total=829i,disk_free=8175935488i,disk_free_limit=50000000i,mem_used=58771080i,proc_total=1048576i,proc_used=267i,run_queue=0i,sockets_used=2i,running=1i 149368403500000000 diff --git a/plugins/inputs/raindrops/README.md b/plugins/inputs/raindrops/README.md index cdc13eec2d9a6..c380310513e0f 100644 --- a/plugins/inputs/raindrops/README.md +++ b/plugins/inputs/raindrops/README.md @@ -3,7 +3,7 @@ The [raindrops](http://raindrops.bogomips.org/) plugin reads from specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) URI and adds stats to InfluxDB. -### Configuration: +## Configuration ```toml # Read raindrops stats @@ -11,31 +11,31 @@ specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middlewa urls = ["http://localhost:8080/_raindrops"] ``` -### Measurements & Fields: +## Measurements & Fields - raindrops - - calling (integer, count) - - writing (integer, count) + - calling (integer, count) + - writing (integer, count) - raindrops_listen - - active (integer, bytes) - - queued (integer, bytes) + - active (integer, bytes) + - queued (integer, bytes) -### Tags: +## Tags - Raindops calling/writing of all the workers: - - server - - port + - server + - port - raindrops_listen (ip:port): - - ip - - port + - ip + - port - raindrops_listen (Unix Socket): - - socket + - socket -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter raindrops --test * Plugin: raindrops, Collection 1 > raindrops,port=8080,server=localhost calling=0i,writing=0i 1455479896806238204 diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md index 9c1cda75bff10..65dee749ee7bd 100644 --- a/plugins/inputs/ras/README.md +++ b/plugins/inputs/ras/README.md @@ -4,7 +4,7 @@ This plugin is only available on Linux (only for `386`, `amd64`, `arm` and `arm6 The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). -### Configuration +## Configuration ```toml [[inputs.ras]] @@ -15,7 +15,7 @@ The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://githu In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. -### Metrics +## Metrics - ras - tags: @@ -40,6 +40,7 @@ In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case - unclassified_mce_errors Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: + - internal_timer_errors - smm_handler_code_access_violation_errors - internal_parity_errors @@ -48,13 +49,13 @@ Please note that `processor_base_errors` is aggregate counter measuring the foll - microcode_rom_parity_errors - unclassified_mce_errors -### Permissions +## Permissions This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. -### Example Output +## Example Output -``` +```shell ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 ``` diff --git a/plugins/inputs/ravendb/README.md b/plugins/inputs/ravendb/README.md index b40850ab5c82d..e527d167f1281 100644 --- a/plugins/inputs/ravendb/README.md +++ b/plugins/inputs/ravendb/README.md @@ -4,7 +4,7 @@ Reads metrics from RavenDB servers via monitoring endpoints APIs. Requires RavenDB Server 5.2+. -### Configuration +## Configuration The following is an example config for RavenDB. **Note:** The client certificate used should have `Operator` permissions on the cluster. @@ -43,7 +43,7 @@ The following is an example config for RavenDB. **Note:** The client certificate # collection_stats_dbs = [] ``` -### Metrics +## Metrics - ravendb_server - tags: @@ -57,7 +57,7 @@ The following is an example config for RavenDB. **Note:** The client certificate - certificate_server_certificate_expiration_left_in_sec (optional) - certificate_well_known_admin_certificates (optional, separated by ';') - cluster_current_term - - cluster_index + - cluster_index - cluster_node_state - 0 -> Passive - 1 -> Candidate @@ -147,7 +147,7 @@ The following is an example config for RavenDB. **Note:** The client certificate - uptime_in_sec - ravendb_indexes - - tags: + - tags: - database_name - index_name - node_tag @@ -201,16 +201,16 @@ The following is an example config for RavenDB. **Note:** The client certificate - tombstones_size_in_bytes - total_size_in_bytes -### Example output +## Example output -``` +```text > ravendb_server,cluster_id=07aecc42-9194-4181-999c-1c42450692c9,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 backup_current_number_of_running_backups=0i,backup_max_number_of_concurrent_backups=4i,certificate_server_certificate_expiration_left_in_sec=-1,cluster_current_term=2i,cluster_index=10i,cluster_node_state=4i,config_server_urls="http://127.0.0.1:8080",cpu_assigned_processor_count=8i,cpu_machine_usage=19.09944089456869,cpu_process_usage=0.16977205323024872,cpu_processor_count=8i,cpu_thread_pool_available_completion_port_threads=1000i,cpu_thread_pool_available_worker_threads=32763i,databases_loaded_count=1i,databases_total_count=1i,disk_remaining_storage_space_percentage=18i,disk_system_store_total_data_file_size_in_mb=35184372088832i,disk_system_store_used_data_file_size_in_mb=31379031064576i,disk_total_free_space_in_mb=42931i,license_expiration_left_in_sec=24079222.8772186,license_max_cores=256i,license_type="Enterprise",license_utilized_cpu_cores=8i,memory_allocated_in_mb=205i,memory_installed_in_mb=16384i,memory_low_memory_severity=0i,memory_physical_in_mb=16250i,memory_total_dirty_in_mb=0i,memory_total_swap_size_in_mb=0i,memory_total_swap_usage_in_mb=0i,memory_working_set_swap_usage_in_mb=0i,network_concurrent_requests_count=1i,network_last_request_time_in_sec=0.0058717,network_requests_per_sec=0.09916543455308825,network_tcp_active_connections=128i,network_total_requests=10i,server_full_version="5.2.0-custom-52",server_process_id=31044i,server_version="5.2",uptime_in_sec=56i 1613027977000000000 > ravendb_databases,database_id=ced0edba-8f80-48b8-8e81-c3d2c6748ec3,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 counts_alerts=0i,counts_attachments=17i,counts_documents=1059i,counts_performance_hints=0i,counts_rehabs=0i,counts_replication_factor=1i,counts_revisions=5475i,counts_unique_attachments=17i,indexes_auto_count=0i,indexes_count=7i,indexes_disabled_count=0i,indexes_errored_count=0i,indexes_errors_count=0i,indexes_idle_count=0i,indexes_stale_count=0i,indexes_static_count=7i,statistics_doc_puts_per_sec=0,statistics_map_index_indexes_per_sec=0,statistics_map_reduce_index_mapped_per_sec=0,statistics_map_reduce_index_reduced_per_sec=0,statistics_request_average_duration_in_ms=0,statistics_requests_count=0i,statistics_requests_per_sec=0,storage_documents_allocated_data_file_in_mb=140737488355328i,storage_documents_used_data_file_in_mb=74741020884992i,storage_indexes_allocated_data_file_in_mb=175921860444160i,storage_indexes_used_data_file_in_mb=120722940755968i,storage_total_allocated_storage_file_in_mb=325455441821696i,storage_total_free_space_in_mb=42931i,uptime_in_sec=54 1613027977000000000 > ravendb_indexes,database_name=db1,host=DESKTOP-2OISR6D,index_name=Orders/Totals,node_tag=A,url=http://localhost:8080 errors=0i,is_invalid=false,lock_mode="Unlock",mapped_per_sec=0,priority="Normal",reduced_per_sec=0,state="Normal",status="Running",time_since_last_indexing_in_sec=45.4256655,time_since_last_query_in_sec=45.4304202,type="Map" 1613027977000000000 > ravendb_collections,collection_name=@hilo,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 documents_count=8i,documents_size_in_bytes=122880i,revisions_size_in_bytes=0i,tombstones_size_in_bytes=122880i,total_size_in_bytes=245760i 1613027977000000000 ``` -### Contributors +## Contributors -- Marcin Lewandowski (https://github.com/ml054/) -- Casey Barton (https://github.com/bartoncasey) \ No newline at end of file +- Marcin Lewandowski () +- Casey Barton () diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md index cabf7e088047b..a033493e605a1 100644 --- a/plugins/inputs/redfish/README.md +++ b/plugins/inputs/redfish/README.md @@ -4,7 +4,7 @@ The `redfish` plugin gathers metrics and status information about CPU temperatur Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[inputs.redfish]] @@ -29,7 +29,7 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - redfish_thermal_temperatures - tags: @@ -50,8 +50,7 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - -+ redfish_thermal_fans +- redfish_thermal_fans - tags: - source - member_id @@ -70,7 +69,6 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - - redfish_power_powersupplies - tags: - source @@ -90,7 +88,6 @@ Telegraf minimum version: Telegraf 1.15.0 - power_input_watts - power_output_watts - - redfish_power_voltages (available only if voltage data is found) - tags: - source @@ -110,10 +107,9 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal +## Example Output -### Example Output - -``` +```text redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index bd89ea75346b2..eff031bab7cfd 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -1,6 +1,6 @@ # Redis Input Plugin -### Configuration: +## Configuration ```toml # Read Redis's basic status information @@ -37,7 +37,7 @@ # insecure_skip_verify = true ``` -### Measurements & Fields: +## Measurements & Fields The plugin gathers the results of the [INFO](https://redis.io/commands/info) redis command. There are two separate measurements: _redis_ and _redis\_keyspace_, the latter is used for gathering database related statistics. @@ -45,97 +45,97 @@ There are two separate measurements: _redis_ and _redis\_keyspace_, the latter i Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) and the elapsed time since the last rdb save (rdb\_last\_save\_time\_elapsed). - redis - - keyspace_hitrate(float, number) - - rdb_last_save_time_elapsed(int, seconds) + - keyspace_hitrate(float, number) + - rdb_last_save_time_elapsed(int, seconds) **Server** - - uptime(int, seconds) - - lru_clock(int, number) - - redis_version(string) + - uptime(int, seconds) + - lru_clock(int, number) + - redis_version(string) **Clients** - - clients(int, number) - - client_longest_output_list(int, number) - - client_biggest_input_buf(int, number) - - blocked_clients(int, number) + - clients(int, number) + - client_longest_output_list(int, number) + - client_biggest_input_buf(int, number) + - blocked_clients(int, number) **Memory** - - used_memory(int, bytes) - - used_memory_rss(int, bytes) - - used_memory_peak(int, bytes) - - total_system_memory(int, bytes) - - used_memory_lua(int, bytes) - - maxmemory(int, bytes) - - maxmemory_policy(string) - - mem_fragmentation_ratio(float, number) + - used_memory(int, bytes) + - used_memory_rss(int, bytes) + - used_memory_peak(int, bytes) + - total_system_memory(int, bytes) + - used_memory_lua(int, bytes) + - maxmemory(int, bytes) + - maxmemory_policy(string) + - mem_fragmentation_ratio(float, number) **Persistence** - - loading(int,flag) - - rdb_changes_since_last_save(int, number) - - rdb_bgsave_in_progress(int, flag) - - rdb_last_save_time(int, seconds) - - rdb_last_bgsave_status(string) - - rdb_last_bgsave_time_sec(int, seconds) - - rdb_current_bgsave_time_sec(int, seconds) - - aof_enabled(int, flag) - - aof_rewrite_in_progress(int, flag) - - aof_rewrite_scheduled(int, flag) - - aof_last_rewrite_time_sec(int, seconds) - - aof_current_rewrite_time_sec(int, seconds) - - aof_last_bgrewrite_status(string) - - aof_last_write_status(string) + - loading(int,flag) + - rdb_changes_since_last_save(int, number) + - rdb_bgsave_in_progress(int, flag) + - rdb_last_save_time(int, seconds) + - rdb_last_bgsave_status(string) + - rdb_last_bgsave_time_sec(int, seconds) + - rdb_current_bgsave_time_sec(int, seconds) + - aof_enabled(int, flag) + - aof_rewrite_in_progress(int, flag) + - aof_rewrite_scheduled(int, flag) + - aof_last_rewrite_time_sec(int, seconds) + - aof_current_rewrite_time_sec(int, seconds) + - aof_last_bgrewrite_status(string) + - aof_last_write_status(string) **Stats** - - total_connections_received(int, number) - - total_commands_processed(int, number) - - instantaneous_ops_per_sec(int, number) - - total_net_input_bytes(int, bytes) - - total_net_output_bytes(int, bytes) - - instantaneous_input_kbps(float, KB/sec) - - instantaneous_output_kbps(float, KB/sec) - - rejected_connections(int, number) - - sync_full(int, number) - - sync_partial_ok(int, number) - - sync_partial_err(int, number) - - expired_keys(int, number) - - evicted_keys(int, number) - - keyspace_hits(int, number) - - keyspace_misses(int, number) - - pubsub_channels(int, number) - - pubsub_patterns(int, number) - - latest_fork_usec(int, microseconds) - - migrate_cached_sockets(int, number) + - total_connections_received(int, number) + - total_commands_processed(int, number) + - instantaneous_ops_per_sec(int, number) + - total_net_input_bytes(int, bytes) + - total_net_output_bytes(int, bytes) + - instantaneous_input_kbps(float, KB/sec) + - instantaneous_output_kbps(float, KB/sec) + - rejected_connections(int, number) + - sync_full(int, number) + - sync_partial_ok(int, number) + - sync_partial_err(int, number) + - expired_keys(int, number) + - evicted_keys(int, number) + - keyspace_hits(int, number) + - keyspace_misses(int, number) + - pubsub_channels(int, number) + - pubsub_patterns(int, number) + - latest_fork_usec(int, microseconds) + - migrate_cached_sockets(int, number) **Replication** - - connected_slaves(int, number) - - master_link_down_since_seconds(int, number) - - master_link_status(string) - - master_repl_offset(int, number) - - second_repl_offset(int, number) - - repl_backlog_active(int, number) - - repl_backlog_size(int, bytes) - - repl_backlog_first_byte_offset(int, number) - - repl_backlog_histlen(int, bytes) + - connected_slaves(int, number) + - master_link_down_since_seconds(int, number) + - master_link_status(string) + - master_repl_offset(int, number) + - second_repl_offset(int, number) + - repl_backlog_active(int, number) + - repl_backlog_size(int, bytes) + - repl_backlog_first_byte_offset(int, number) + - repl_backlog_histlen(int, bytes) **CPU** - - used_cpu_sys(float, number) - - used_cpu_user(float, number) - - used_cpu_sys_children(float, number) - - used_cpu_user_children(float, number) + - used_cpu_sys(float, number) + - used_cpu_user(float, number) + - used_cpu_sys_children(float, number) + - used_cpu_user_children(float, number) **Cluster** - - cluster_enabled(int, flag) + - cluster_enabled(int, flag) - redis_keyspace - - keys(int, number) - - expires(int, number) - - avg_ttl(int, number) + - keys(int, number) + - expires(int, number) + - avg_ttl(int, number) - redis_cmdstat Every Redis used command will have 3 new fields: - - calls(int, number) - - usec(int, mircoseconds) - - usec_per_call(float, microseconds) + - calls(int, number) + - usec(int, mircoseconds) + - usec_per_call(float, microseconds) - redis_replication - tags: @@ -148,22 +148,23 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - lag(int, number) - offset(int, number) -### Tags: +## Tags - All measurements have the following tags: - - port - - server - - replication_role + - port + - server + - replication_role - The redis_keyspace measurement has an additional database tag: - - database + - database - The redis_cmdstat measurement has an additional tag: - - command + - command -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.redis]] ## specify servers via a url matching: @@ -178,22 +179,26 @@ Using this configuration: ``` When run with: -``` + +```sh ./telegraf --config telegraf.conf --input-filter redis --test ``` It produces: -``` + +```shell * Plugin: redis, Collection 1 > redis,server=localhost,port=6379,replication_role=master,host=host keyspace_hitrate=1,clients=2i,blocked_clients=0i,instantaneous_input_kbps=0,sync_full=0i,pubsub_channels=0i,pubsub_patterns=0i,total_net_output_bytes=6659253i,used_memory=842448i,total_system_memory=8351916032i,aof_current_rewrite_time_sec=-1i,rdb_changes_since_last_save=0i,sync_partial_err=0i,latest_fork_usec=508i,instantaneous_output_kbps=0,expired_keys=0i,used_memory_peak=843416i,aof_rewrite_in_progress=0i,aof_last_bgrewrite_status="ok",migrate_cached_sockets=0i,connected_slaves=0i,maxmemory_policy="noeviction",aof_rewrite_scheduled=0i,total_net_input_bytes=3125i,used_memory_rss=9564160i,repl_backlog_histlen=0i,rdb_last_bgsave_status="ok",aof_last_rewrite_time_sec=-1i,keyspace_misses=0i,client_biggest_input_buf=5i,used_cpu_user=1.33,maxmemory=0i,rdb_current_bgsave_time_sec=-1i,total_commands_processed=271i,repl_backlog_size=1048576i,used_cpu_sys=3,uptime=2822i,lru_clock=16706281i,used_memory_lua=37888i,rejected_connections=0i,sync_partial_ok=0i,evicted_keys=0i,rdb_last_save_time_elapsed=1922i,rdb_last_save_time=1493099368i,instantaneous_ops_per_sec=0i,used_cpu_user_children=0,client_longest_output_list=0i,master_repl_offset=0i,repl_backlog_active=0i,keyspace_hits=2i,used_cpu_sys_children=0,cluster_enabled=0i,rdb_last_bgsave_time_sec=0i,aof_last_write_status="ok",total_connections_received=263i,aof_enabled=0i,repl_backlog_first_byte_offset=0i,mem_fragmentation_ratio=11.35,loading=0i,rdb_bgsave_in_progress=0i 1493101290000000000 ``` redis_keyspace: -``` + +```shell > redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000 ``` redis_command: -``` + +```shell > redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000 ``` diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md index b1946644ea13a..852da8318e704 100644 --- a/plugins/inputs/rethinkdb/README.md +++ b/plugins/inputs/rethinkdb/README.md @@ -2,7 +2,7 @@ Collect metrics from [RethinkDB](https://www.rethinkdb.com/). -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage rethinkdb`. @@ -25,7 +25,7 @@ generate it using `telegraf --usage rethinkdb`. # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] ``` -### Metrics +## Metrics - rethinkdb - tags: @@ -44,7 +44,7 @@ generate it using `telegraf --usage rethinkdb`. - disk_usage_metadata_bytes (integer, bytes) - disk_usage_preallocated_bytes (integer, bytes) -+ rethinkdb_engine +- rethinkdb_engine - tags: - type - ns diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md index a435eea4d7f63..f1a46af336ff9 100644 --- a/plugins/inputs/riak/README.md +++ b/plugins/inputs/riak/README.md @@ -2,7 +2,7 @@ The Riak plugin gathers metrics from one or more riak instances. -### Configuration: +## Configuration ```toml # Description @@ -11,7 +11,7 @@ The Riak plugin gathers metrics from one or more riak instances. servers = ["http://localhost:8098"] ``` -### Measurements & Fields: +## Measurements & Fields Riak provides one measurement named "riak", with the following fields: @@ -63,16 +63,16 @@ Riak provides one measurement named "riak", with the following fields: Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds. -### Tags: +## Tags All measurements have the following tags: - server (the host:port of the given server address, ex. `127.0.0.1:8087`) - nodename (the internal node name received, ex. `riak@127.0.0.1`) -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter riak --test > riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i,read_repair=0i,read_repairs_total=0i 1455913392622482332 ``` diff --git a/plugins/inputs/riemann_listener/README.md b/plugins/inputs/riemann_listener/README.md index 54e70be6ecb71..9110a5f1eb147 100644 --- a/plugins/inputs/riemann_listener/README.md +++ b/plugins/inputs/riemann_listener/README.md @@ -3,8 +3,7 @@ The Riemann Listener is a simple input plugin that listens for messages from client that use riemann clients using riemann-protobuff format. - -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -36,6 +35,7 @@ This is a sample configuration for the plugin. ## Defaults to the OS configuration. # keep_alive_period = "5m" ``` + Just like Riemann the default port is 5555. This can be configured, refer configuration above. Riemann `Service` is mapped as `measurement`. `metric` and `TTL` are converted into field values. From 837465fcd546408677e78845608b665909399dfe Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 24 Nov 2021 11:50:13 -0700 Subject: [PATCH 073/133] chore: clean up all errors for markdown lint input plugins s through v (#10167) --- plugins/inputs/salesforce/README.md | 21 +- plugins/inputs/sensors/README.md | 24 +- plugins/inputs/sflow/README.md | 26 +- plugins/inputs/smart/README.md | 75 ++- plugins/inputs/snmp/README.md | 69 +-- plugins/inputs/snmp_legacy/README.md | 140 +++--- plugins/inputs/socket_listener/README.md | 14 +- plugins/inputs/solr/README.md | 6 +- plugins/inputs/sql/README.md | 46 +- plugins/inputs/sqlserver/README.md | 28 +- plugins/inputs/stackdriver/README.md | 30 +- plugins/inputs/statsd/README.md | 98 ++-- plugins/inputs/suricata/README.md | 21 +- plugins/inputs/swap/README.md | 8 +- plugins/inputs/synproxy/README.md | 17 +- plugins/inputs/syslog/README.md | 34 +- plugins/inputs/sysstat/README.md | 174 ++++--- plugins/inputs/system/README.md | 25 +- plugins/inputs/systemd_units/README.md | 16 +- plugins/inputs/tail/README.md | 8 +- plugins/inputs/teamspeak/README.md | 38 +- plugins/inputs/temp/README.md | 14 +- plugins/inputs/tengine/README.md | 8 +- plugins/inputs/tomcat/README.md | 10 +- plugins/inputs/trig/README.md | 9 +- plugins/inputs/twemproxy/README.md | 4 +- plugins/inputs/unbound/README.md | 16 +- plugins/inputs/uwsgi/README.md | 41 +- plugins/inputs/varnish/README.md | 610 ++++++++++++----------- plugins/inputs/vsphere/METRICS.md | 22 +- plugins/inputs/vsphere/README.md | 135 ++--- 31 files changed, 931 insertions(+), 856 deletions(-) diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 6883f3a90b85f..26668212acce7 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -3,7 +3,7 @@ The Salesforce plugin gathers metrics about the limits in your Salesforce organization and the remaining usage. It fetches its data from the [limits endpoint](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_limits.htm) of Salesforce's REST API. -### Configuration: +## Configuration ```toml # Gather Metrics about Salesforce limits and remaining usage @@ -19,7 +19,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ # version = "39.0" ``` -### Measurements & Fields: +## Measurements & Fields Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. @@ -28,20 +28,19 @@ Each entry is converted to snake\_case and 2 fields are created. - \_remaining represents the usage remaining before hitting the limit threshold - salesforce - - \_max (int) - - \_remaining (int) - - (...) + - \_max (int) + - \_remaining (int) + - (...) -### Tags: +## Tags - All measurements have the following tags: - - host - - organization_id (t18 char organisation ID) + - host + - organization_id (t18 char organisation ID) +## Example Output -### Example Output: - -``` +```sh $./telegraf --config telegraf.conf --input-filter salesforce --test salesforce,organization_id=XXXXXXXXXXXXXXXXXX,host=xxxxx.salesforce.com daily_workflow_emails_max=546000i,hourly_time_based_workflow_max=50i,daily_async_apex_executions_remaining=250000i,daily_durable_streaming_api_events_remaining=1000000i,streaming_api_concurrent_clients_remaining=2000i,daily_bulk_api_requests_remaining=10000i,hourly_sync_report_runs_remaining=500i,daily_api_requests_max=5000000i,data_storage_mb_remaining=1073i,file_storage_mb_remaining=1069i,daily_generic_streaming_api_events_remaining=10000i,hourly_async_report_runs_remaining=1200i,hourly_time_based_workflow_remaining=50i,daily_streaming_api_events_remaining=1000000i,single_email_max=5000i,hourly_dashboard_refreshes_remaining=200i,streaming_api_concurrent_clients_max=2000i,daily_durable_generic_streaming_api_events_remaining=1000000i,daily_api_requests_remaining=4999998i,hourly_dashboard_results_max=5000i,hourly_async_report_runs_max=1200i,daily_durable_generic_streaming_api_events_max=1000000i,hourly_dashboard_results_remaining=5000i,concurrent_sync_report_runs_max=20i,durable_streaming_api_concurrent_clients_remaining=2000i,daily_workflow_emails_remaining=546000i,hourly_dashboard_refreshes_max=200i,daily_streaming_api_events_max=1000000i,hourly_sync_report_runs_max=500i,hourly_o_data_callout_max=10000i,mass_email_max=5000i,mass_email_remaining=5000i,single_email_remaining=5000i,hourly_dashboard_statuses_max=999999999i,concurrent_async_get_report_instances_max=200i,daily_durable_streaming_api_events_max=1000000i,daily_generic_streaming_api_events_max=10000i,hourly_o_data_callout_remaining=10000i,concurrent_sync_report_runs_remaining=20i,daily_bulk_api_requests_max=10000i,data_storage_mb_max=1073i,hourly_dashboard_statuses_remaining=999999999i,concurrent_async_get_report_instances_remaining=200i,daily_async_apex_executions_max=250000i,durable_streaming_api_concurrent_clients_max=2000i,file_storage_mb_max=1073i 1501565661000000000 diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index d9bcfe2e4544d..9de12f588b556 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -5,7 +5,8 @@ package installed. This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package. -### Configuration: +## Configuration + ```toml # Monitor sensors, requires lm-sensors package [[inputs.sensors]] @@ -17,19 +18,21 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se # timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields + Fields are created dynamically depending on the sensors. All fields are float. -### Tags: +## Tags - All measurements have the following tags: - - chip - - feature + - chip + - feature -### Example Output: +## Example Output -#### Default -``` +### Default + +```shell $ telegraf --config telegraf.conf --input-filter sensors --test * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000 @@ -39,8 +42,9 @@ $ telegraf --config telegraf.conf --input-filter sensors --test > sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000 ``` -#### With remove_numbers=false -``` +### With remove_numbers=false + +```shell * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000 > sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000 diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 9e5366706e5df..80413048ba658 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -6,7 +6,7 @@ accordance with the specification from [sflow.org](https://sflow.org/). Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are turned into metrics. Counters and other header samples are ignored. -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -18,7 +18,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration ```toml [[inputs.sflow]] @@ -33,7 +33,7 @@ avoid cardinality issues: # read_buffer_size = "" ``` -### Metrics +## Metrics - sflow - tags: @@ -81,34 +81,36 @@ avoid cardinality issues: - ip_flags (integer, ip_ver field of IPv4 structures) - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6)) -### Troubleshooting +## Troubleshooting The [sflowtool][] utility can be used to print sFlow packets, and compared against the metrics produced by Telegraf. -``` + +```sh sflowtool -p 6343 ``` If opening an issue, in addition to the output of sflowtool it will also be helpful to collect a packet capture. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 ``` [sflowtool]: https://github.com/sflow/sflowtool -### Example Output -``` +## Example Output + +```shell sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 ``` -### Reference Documentation +## Reference Documentation -This sflow implementation was built from the reference document +This sflow implementation was built from the reference document [sflow.org/sflow_version_5.txt](sflow_version_5) - [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index dec58e3f9afab..31ab92d3ad9d6 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -1,19 +1,19 @@ # S.M.A.R.T. Input Plugin Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs) that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. -See smartmontools (https://www.smartmontools.org/). +See smartmontools (). SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. If no devices are specified, the plugin will scan for SMART devices via the following command: -``` +```sh smartctl --scan ``` Metrics will be reported from the following `smartctl` command: -``` +```sh smartctl --info --attributes --health -n --format=brief ``` @@ -23,41 +23,48 @@ Also, NVMe capabilities were introduced in version 6.5. To enable SMART on a storage device run: -``` +```sh smartctl -s on ``` + ## NVMe vendor specific attributes -For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature +For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature to easy access a vendor specific attributes. -This plugin supports nmve-cli version 1.5 and above (https://github.com/linux-nvme/nvme-cli). +This plugin supports nmve-cli version 1.5 and above (). In case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. Vendor specific SMART metrics for NVMe disks may be reported from the following `nvme` command: -``` +```sh nvme smart-log-add ``` Note that vendor plugins for `nvme-cli` could require different naming convention and report format. To see installed plugin extensions, depended on the nvme-cli version, look at the bottom of: -``` + +```sh nvme help ``` To gather disk vendor id (vid) `id-ctrl` could be used: -``` + +```sh nvme id-ctrl ``` -Association between a vid and company can be found there: https://pcisig.com/membership/member-companies. + +Association between a vid and company can be found there: . Devices affiliation to being NVMe or non NVMe will be determined thanks to: -``` + +```sh smartctl --scan ``` + and: -``` + +```sh smartctl --scan -d nvme ``` @@ -113,12 +120,14 @@ It's important to note that this plugin references smartctl and nvme-cli, which Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. You will need the following in your telegraf config: + ```toml [[inputs.smart]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # For smartctl add the following lines: @@ -131,6 +140,7 @@ Cmnd_Alias NVME = /path/to/nvme telegraf ALL=(ALL) NOPASSWD: NVME Defaults!NVME !logfile, !syslog, !pam_session ``` + To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartctl` or `path_nvme` in the configuration should be set to execute this script. @@ -171,57 +181,70 @@ To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartct - value - worst -#### Flags +### Flags The interpretation of the tag `flags` is: - - `K` auto-keep - - `C` event count - - `R` error rate - - `S` speed/performance - - `O` updated online - - `P` prefailure warning -#### Exit Status +- `K` auto-keep +- `C` event count +- `R` error rate +- `S` speed/performance +- `O` updated online +- `P` prefailure warning + +### Exit Status The `exit_status` field captures the exit status of the used cli utilities command which is defined by a bitmask. For the interpretation of the bitmask see the man page for smartctl or nvme-cli. ## Device Names + Device names, e.g., `/dev/sda`, are *not persistent*, and may be subject to change across reboots or system changes. Instead, you can use the *World Wide Name* (WWN) or serial number to identify devices. On Linux block devices can be referenced by the WWN in the following location: `/dev/disk/by-id/`. + ## Troubleshooting + If you expect to see more SMART metrics than this plugin shows, be sure to use a proper version of smartctl or nvme-cli utility which has the functionality to gather desired data. Also, check -your device capability because not every SMART metrics are mandatory. +your device capability because not every SMART metrics are mandatory. For example the number of temperature sensors depends on the device specification. If this plugin is not working as expected for your SMART enabled device, please run these commands and include the output in a bug report: For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe devices by default): -``` + +```sh smartctl --scan ``` + For NVMe devices: -``` + +```sh smartctl --scan -d nvme ``` + Run the following command replacing your configuration setting for NOCHECK and the DEVICE (name of the device could be taken from the previous command): -``` + +```sh smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE ``` + If you try to gather vendor specific metrics, please provide this commad and replace vendor and device to match your case: -``` + +```sh nvme VENDOR smart-log-add DEVICE ``` + ## Example SMART Plugin Outputs -``` + +```shell smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000 diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 3728cddb34349..b9cb69a5fedf5 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -4,7 +4,7 @@ The `snmp` input plugin uses polling to gather metrics from SNMP agents. Support for gathering individual OIDs as well as complete SNMP tables is included. -### Prerequisites +## Prerequisites This plugin uses the `snmptable` and `snmptranslate` programs from the [net-snmp][] project. These tools will need to be installed into the `PATH` in @@ -18,7 +18,8 @@ location of these files can be configured in the `snmp.conf` or via the `MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more information. -### Configuration +## Configuration + ```toml [[inputs.snmp]] ## Agent addresses to retrieve values from. @@ -91,13 +92,13 @@ information. is_tag = true ``` -#### Configure SNMP Requests +### Configure SNMP Requests This plugin provides two methods for configuring the SNMP requests: `fields` and `tables`. Use the `field` option to gather single ad-hoc variables. To collect SNMP tables, use the `table` option. -##### Field +#### Field Use a `field` to collect a variable by OID. Requests specified with this option operate similar to the `snmpget` utility. @@ -138,7 +139,7 @@ option operate similar to the `snmpget` utility. # conversion = "" ``` -##### Table +#### Table Use a `table` to configure the collection of a SNMP table. SNMP requests formed with this option operate similarly way to the `snmptable` command. @@ -201,7 +202,7 @@ One [metric][] is created for each row of the SNMP table. ## Specifies if the value of given field should be snmptranslated ## by default no field values are translated # translate = true - + ## Secondary index table allows to merge data from two tables with ## different index that this filed will be used to join them. There can ## be only one secondary index table. @@ -220,27 +221,30 @@ One [metric][] is created for each row of the SNMP table. # secondary_outer_join = false ``` -##### Two Table Join +#### Two Table Join + Snmp plugin can join two snmp tables that have different indexes. For this to work one table should have translation field that return index of second table as value. Examples of such fields are: - * Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, + +* Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, which value is IfIndex from ifTable - * Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +* Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, which value is IfIndex from ifTable - * Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +* Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, which value is index from entPhysicalTable - + Such field can be used to translate index to secondary table with `secondary_index_table = true` and all fields from secondary table (with index pointed from translation field), should have added option `secondary_index_use = true`. Telegraf cannot duplicate entries during join so translation must be 1-to-1 (not 1-to-many). To add fields from secondary table with index that is not present in translation table (outer join), there is a second option for translation index `secondary_outer_join = true`. -###### Example configuration for table joins +##### Example configuration for table joins CISCO-POWER-ETHERNET-EXT-MIB table before join: -``` + +```toml [[inputs.snmp.table]] name = "ciscoPower" index_as_tag = true @@ -255,14 +259,16 @@ oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" ``` Partial result (removed agent_host and host columns from all following outputs in this section): -``` + +```shell > ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 > ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 > ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 ``` Note here that EntPhyIndex column carries index from ENTITY-MIB table, config for it: -``` + +```toml [[inputs.snmp.table]] name = "entityTable" index_as_tag = true @@ -271,8 +277,10 @@ index_as_tag = true name = "EntPhysicalName" oid = "ENTITY-MIB::entPhysicalName" ``` + Partial result: -``` + +```text > entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 > entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 > entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 @@ -282,7 +290,7 @@ Now, lets attempt to join these results into one table. EntPhyIndex matches inde from second table, and lets convert EntPhysicalName into tag, so second table will only provide tags into result. Configuration: -``` +```toml [[inputs.snmp.table]] name = "ciscoPowerEntity" index_as_tag = true @@ -304,40 +312,45 @@ is_tag = true ``` Result: -``` + +```shell > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 ``` -### Troubleshooting +## Troubleshooting Check that a numeric field can be translated to a textual field: -``` + +```sh $ snmptranslate .1.3.6.1.2.1.1.3.0 DISMAN-EVENT-MIB::sysUpTimeInstance ``` Request a top-level field: -``` -$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 + +```sh +snmpget -v2c -c public 127.0.0.1 sysUpTime.0 ``` Request a table: -``` -$ snmptable -v2c -c public 127.0.0.1 ifTable + +```sh +snmptable -v2c -c public 127.0.0.1 ifTable ``` To collect a packet capture, run this command in the background while running Telegraf or one of the above commands. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 ``` -### Example Output +## Example Output -``` +```shell snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 8e639900ffe0f..e4a91080704e6 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,17 +1,16 @@ # SNMP Legacy Input Plugin -### Deprecated in version 1.0. Use [SNMP input plugin][]. +## Deprecated in version 1.0. Use [SNMP input plugin][] The SNMP input plugin gathers metrics from SNMP agents -### Configuration: +## Configuration - -#### Very simple example +### Very simple example In this example, the plugin will gather value of OIDS: - - `.1.3.6.1.2.1.2.2.1.4.1` +- `.1.3.6.1.2.1.2.2.1.4.1` ```toml # Very Simple Example @@ -28,36 +27,34 @@ In this example, the plugin will gather value of OIDS: get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] ``` - -#### Simple example +### Simple example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** +- named **ifnumber** +- named **interface_speed** With **inputs.snmp.get** section the plugin gets the oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* As you can see *ifSpeed* is not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` Also as the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.1.0` - `.1.3.6.1.2.1.2.2.1.5.1` - ```toml # Simple example [[inputs.snmp]] @@ -88,36 +85,35 @@ In this example, the plugin will gather value of OIDS: ``` - -#### Simple bulk example +### Simple bulk example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** - - named **if_out_octets** +- named **ifnumber** +- named **interface_speed** +- named **if_out_octets** With **inputs.snmp.get** section the plugin gets oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* With **inputs.snmp.bulk** section the plugin gets the oid number: - - **if_out_octets** => *ifOutOctets* +- **if_out_octets** => *ifOutOctets* As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` - - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` Also, the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` And **if_out_octets** is a bulk request, the plugin will gathers all OIDS in the table. @@ -140,7 +136,6 @@ In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.2.1.16.5` - `...` - ```toml # Simple bulk example [[inputs.snmp]] @@ -174,8 +169,7 @@ In this example, the plugin will gather value of OIDS: oid = "ifOutOctets" ``` - -#### Table example +### Table example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -185,11 +179,11 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable1** +- named **iftable1** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable1** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable1** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -239,8 +233,7 @@ OIDS in the table and in the subtables oid = ".1.3.6.1.2.1.31.1.1.1" ``` - -#### Table with subtable example +### Table with subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -250,12 +243,12 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable2** +- named **iftable2** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` +- **iftable2** => `.1.3.6.1.2.1.2.2.1.13` Also **iftable2** is a table, the plugin will gathers all OIDS in subtables: @@ -266,7 +259,6 @@ OIDS in subtables: - `.1.3.6.1.2.1.2.2.1.13.4` - `.1.3.6.1.2.1.2.2.1.13....` - ```toml # Table with subtable example [[inputs.snmp]] @@ -295,19 +287,18 @@ OIDS in subtables: # oid attribute is useless ``` - -#### Table with mapping example +### Table with mapping example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable3** +- named **iftable3** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable3** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -334,11 +325,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -399,20 +391,19 @@ Note: the plugin will add instance name as tag *instance* # if empty, get all subtables ``` - -#### Table with both mapping and subtable example +### Table with both mapping and subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable4** +- named **iftable4** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable4** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -433,11 +424,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -459,8 +451,6 @@ the following OIDS: Note: the plugin will add instance name as tag *instance* - - ```toml # Table with both mapping and subtable example [[inputs.snmp]] @@ -505,7 +495,7 @@ Note: the plugin will add instance name as tag *instance* unit = "octets" ``` -#### Configuration notes +### Configuration notes - In **inputs.snmp.table** section, the `oid` attribute is useless if the `sub_tables` attributes is defined @@ -513,38 +503,38 @@ Note: the plugin will add instance name as tag *instance* - In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` as `oid` attribute instead of a valid OID -### Measurements & Fields: +## Measurements & Fields With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - ifHCOutOctets + - ifHCOutOctets - ifInDiscards - - ifInDiscards + - ifInDiscards - ifHCInOctets - - ifHCInOctets + - ifHCInOctets -### Tags: +## Tags With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - host - - instance - - unit + - host + - instance + - unit - ifInDiscards - - host - - instance + - host + - instance - ifHCInOctets - - host - - instance - - unit + - host + - instance + - unit -### Example Output: +## Example Output With the last example (Table with both mapping and subtable example): -``` +```shell ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index f5189a195af9d..c445e0f5a5c78 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -6,7 +6,7 @@ streaming (tcp, unix) or datagram (udp, unixgram) protocols. The plugin expects messages in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -92,7 +92,7 @@ at least 8MB before trying to run large amounts of UDP traffic to your instance. Check the current UDP/IP receive buffer limit & default by typing the following commands: -``` +```sh sysctl net.core.rmem_max sysctl net.core.rmem_default ``` @@ -100,7 +100,7 @@ sysctl net.core.rmem_default If the values are less than 8388608 bytes you should add the following lines to the /etc/sysctl.conf file: -``` +```text net.core.rmem_max=8388608 net.core.rmem_default=8388608 ``` @@ -108,7 +108,7 @@ net.core.rmem_default=8388608 Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following commands as root: -``` +```sh sysctl -w net.core.rmem_max=8388608 sysctl -w net.core.rmem_default=8388608 ``` @@ -123,20 +123,20 @@ happens Check the current UDP/IP buffer limit by typing the following command: -``` +```sh sysctl kern.ipc.maxsockbuf ``` If the value is less than 9646900 bytes you should add the following lines to the /etc/sysctl.conf file (create it if necessary): -``` +```text kern.ipc.maxsockbuf=9646900 ``` Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following command as root: -``` +```sh sysctl -w kern.ipc.maxsockbuf=9646900 ``` diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md index c20fa92836c70..c9d1a6f36ba11 100644 --- a/plugins/inputs/solr/README.md +++ b/plugins/inputs/solr/README.md @@ -7,7 +7,7 @@ More about [performance statistics](https://cwiki.apache.org/confluence/display/ Tested from 3.5 to 7.* -### Configuration: +## Configuration ```toml [[inputs.solr]] @@ -22,9 +22,9 @@ Tested from 3.5 to 7.* # password = "pa$$word" ``` -### Example output of gathered metrics: +## Example output of gathered metrics -``` +```shell ➜ ~ telegraf -config telegraf.conf -input-filter solr -test * Plugin: solr, Collection 1 > solr_core,core=main,handler=searcher,host=testhost deleted_docs=17616645i,max_docs=261848363i,num_docs=244231718i 1478214949000000000 diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md index cc8a464016d28..a932a71c84128 100644 --- a/plugins/inputs/sql/README.md +++ b/plugins/inputs/sql/README.md @@ -5,7 +5,7 @@ types are supported and their settings might differ (especially the connection p Please check the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the `driver` name and options for the data-source-name (`dsn`) options. -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. @@ -73,13 +73,13 @@ generate it using `telegraf --usage `. ## Column names containing fields (explicit types) ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over - ## the automatic (driver-based) conversion below. - ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. # field_columns_float = [] # field_columns_int = [] - # field_columns_uint = [] - # field_columns_bool = [] - # field_columns_string = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] ## Column names containing fields (automatic types) ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty @@ -89,16 +89,20 @@ generate it using `telegraf --usage `. # field_columns_exclude = [] ``` -### Options -#### Driver +## Options + +### Driver + The `driver` and `dsn` options specify how to connect to the database. As especially the `dsn` format and values vary with the `driver` refer to the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible values and more details. -#### Connection limits +### Connection limits + With these options you can limit the number of connections kept open by this plugin. Details about the exact workings can be found in the [golang sql documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime). -#### Query sections +### Query sections + Multiple `query` sections can be specified for this plugin. Each specified query will first be prepared on the server and then executed in every interval using the column mappings specified. Please note that `tag` and `field` columns are not exclusive, i.e. a column can be added to both. When using both `include` and `exclude` lists, the `exclude` @@ -107,31 +111,38 @@ the filter. In case any the columns specified in `measurement_col` or `time_col` the plugin falls-back to the documented defaults. Fields or tags specified in the includes of the options but missing in the returned query are silently ignored. -### Types +## Types + This plugin relies on the driver to do the type conversion. For the different properties of the metric the following types are accepted. -#### Measurement +### Measurement + Only columns of type `string` are accepted. -#### Time +### Time + For the metric time columns of type `time` are accepted directly. For numeric columns, `time_format` should be set to any of `unix`, `unix_ms`, `unix_ns` or `unix_us` accordingly. By default the a timestamp in `unix` format is expected. For string columns, please specify the `time_format` accordingly. See the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for details. -#### Tags +### Tags + For tags columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Those values will be converted to string. -#### Fields +### Fields + For fields columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Here `bytes` will be converted to `string`, signed and unsigned integer values will be converted to `int64` or `uint64` respectively. Floating-point values are converted to `float64` and `time` is converted to a nanosecond timestamp of type `int64`. -### Example Output +## Example Output + Using the [MariaDB sample database](https://www.mariadbtutorial.com/getting-started/mariadb-sample-database) and the configuration + ```toml [[inputs.sql]] driver = "mysql" @@ -145,7 +156,8 @@ configuration ``` Telegraf will output the following metrics -``` + +```shell nation,host=Hugin,name=John guest_id=1i 1611332164000000000 nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000 nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000 diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index c818b4b6d01b3..1ee48ccbae5da 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -3,7 +3,7 @@ The `sqlserver` plugin provides metrics for your SQL Server instance. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. -### The SQL Server plugin supports the following editions/versions of SQL Server +## The SQL Server plugin supports the following editions/versions of SQL Server - SQL Server - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) @@ -12,7 +12,7 @@ lightweight and use Dynamic Management Views supplied by SQL Server. - Azure SQL Managed Instance - Azure SQL Elastic Pool -### Additional Setup +## Additional Setup You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: @@ -57,7 +57,7 @@ GO CREATE USER [telegraf] FOR LOGIN telegraf; ``` -### Configuration +## Configuration ```toml [agent] @@ -203,7 +203,7 @@ CREATE USER [telegraf] FOR LOGIN telegraf; ## - PerformanceMetrics ``` -### Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) +## Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) Azure SQL Database supports 2 main methods of authentication: [SQL authentication and AAD authentication](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). The recommended practice is to [use AAD authentication when possible](https://docs.microsoft.com/en-us/azure/azure-sql/database/authentication-aad-overview). @@ -211,7 +211,7 @@ AAD is a more modern authentication protocol, allows for easier credential/role To enable support for AAD authentication, we leverage the existing AAD authentication support in the [SQL Server driver for Go](https://github.com/denisenkom/go-mssqldb#azure-active-directory-authentication---preview) -#### How to use AAD Auth with MSI +### How to use AAD Auth with MSI - Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). - On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. @@ -239,13 +239,13 @@ EXECUTE ('GRANT VIEW DATABASE STATE TO []') - Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). -### Metrics +## Metrics To provide backwards compatibility, this plugin support two versions of metrics queries. **Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. -#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type +### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type The original metrics queries provide: @@ -265,7 +265,7 @@ If you are using the original queries all stats have the following tags: - `servername`: hostname:instance - `type`: type of stats to easily filter measurements -#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type +### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type The new (version 2) metrics provide: @@ -299,7 +299,7 @@ The new (version 2) metrics provide: - Resource governance stats from `sys.dm_user_db_resource_governance` - Stats from `sys.dm_db_resource_stats` -#### database_type = "AzureSQLDB" +### database_type = "AzureSQLDB" These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs: @@ -313,7 +313,7 @@ These are metrics for Azure SQL Database (single database) and are very similar - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots. -#### database_type = "AzureSQLManagedInstance" +### database_type = "AzureSQLManagedInstance" These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs: @@ -326,7 +326,7 @@ These are metrics for Azure SQL Managed instance, are very similar to version 2 - *AzureSQLMIRequests*: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - *AzureSQLMISchedulers*: This captures `sys.dm_os_schedulers` snapshots. -#### database_type = "AzureSQLPool" +### database_type = "AzureSQLPool" These are metrics for Azure SQL to monitor resources usage at Elastic Pool level. These metrics require additional permissions to be collected, please ensure to check additional setup section in this documentation. @@ -338,7 +338,7 @@ These are metrics for Azure SQL to monitor resources usage at Elastic Pool level - *AzureSQLPoolPerformanceCounters*: A selected list of performance counters from `sys.dm_os_performance_counters`. Note: Performance counters where the cntr_type column value is 537003264 are already returned with a percentage format between 0 and 100. For other counters, please check [sys.dm_os_performance_counters](https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current) documentation. - *AzureSQLPoolSchedulers*: This captures `sys.dm_os_schedulers` snapshots. -#### database_type = "SQLServer" +### database_type = "SQLServer" - *SQLServerDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` - *SQLServerMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. @@ -359,7 +359,7 @@ These are metrics for Azure SQL to monitor resources usage at Elastic Pool level - SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup - SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup -#### Output Measures +### Output Measures The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. @@ -412,7 +412,7 @@ Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. -#### Health Metric +### Health Metric All collection versions (version 1, version 2, and database_type) support an optional plugin health metric called `sqlserver_telegraf_health`. This metric tracks if connections to SQL Server are succeeding or failing. Users can leverage this metric to detect if their SQL Server monitoring is not working as intended. diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md index 6469b259b78ec..7f706f2d13096 100644 --- a/plugins/inputs/stackdriver/README.md +++ b/plugins/inputs/stackdriver/README.md @@ -6,7 +6,7 @@ Query data from Google Cloud Monitoring (formerly Stackdriver) using the This plugin accesses APIs which are [chargeable][pricing]; you might incur costs. -### Configuration +## Configuration ```toml [[inputs.stackdriver]] @@ -58,9 +58,9 @@ costs. ## For a list of aligner strings see: ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # distribution_aggregation_aligners = [ - # "ALIGN_PERCENTILE_99", - # "ALIGN_PERCENTILE_95", - # "ALIGN_PERCENTILE_50", + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", # ] ## Filters can be added to reduce the number of time series matched. All @@ -84,23 +84,24 @@ costs. ## Metric labels refine the time series selection with the following expression: ## metric.labels. = # [[inputs.stackdriver.filter.metric_labels]] - # key = "device_name" - # value = 'one_of("sda", "sdb")' + # key = "device_name" + # value = 'one_of("sda", "sdb")' ``` -#### Authentication +### Authentication It is recommended to use a service account to authenticate with the Stackdriver Monitoring API. [Getting Started with Authentication][auth]. -### Metrics +## Metrics Metrics are created using one of there patterns depending on if the value type is a scalar value, raw distribution buckets, or aligned bucket values. In all cases, the Stackdriver metric type is split on the last component into the measurement and field: -``` + +```sh compute.googleapis.com/instance/disk/read_bytes_count └────────── measurement ─────────┘ └── field ───┘ ``` @@ -114,7 +115,6 @@ compute.googleapis.com/instance/disk/read_bytes_count - fields: - field - **Distributions:** Distributions are represented by a set of fields along with the bucket values @@ -132,7 +132,7 @@ represents the total number of items less than the `lt` tag. - field_range_min - field_range_max -+ measurement +- measurement - tags: - resource_labels - metric_labels @@ -149,14 +149,16 @@ represents the total number of items less than the `lt` tag. - fields: - field_alignment_function -### Troubleshooting +## Troubleshooting When Telegraf is ran with `--debug`, detailed information about the performed queries will be logged. -### Example Output -``` +## Example Output + +```shell ``` + [stackdriver]: https://cloud.google.com/monitoring/api/v3/ [auth]: https://cloud.google.com/docs/authentication/getting-started [pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index ca60dbe3a2a79..e82da5a03c878 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -1,6 +1,6 @@ # StatsD Input Plugin -### Configuration +## Configuration ```toml # Statsd Server @@ -77,7 +77,7 @@ # max_ttl = "10h" ``` -### Description +## Description The statsd plugin is a special type of plugin which runs a backgrounded statsd listener service while telegraf is running. @@ -87,49 +87,48 @@ original [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_ty implementation. In short, the telegraf statsd listener will accept: - Gauges - - `users.current.den001.myapp:32|g` <- standard - - `users.current.den001.myapp:+10|g` <- additive - - `users.current.den001.myapp:-10|g` + - `users.current.den001.myapp:32|g` <- standard + - `users.current.den001.myapp:+10|g` <- additive + - `users.current.den001.myapp:-10|g` - Counters - - `deploys.test.myservice:1|c` <- increments by 1 - - `deploys.test.myservice:101|c` <- increments by 101 - - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 + - `deploys.test.myservice:1|c` <- increments by 1 + - `deploys.test.myservice:101|c` <- increments by 101 + - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 - Sets - - `users.unique:101|s` - - `users.unique:101|s` - - `users.unique:102|s` <- would result in a count of 2 for `users.unique` + - `users.unique:101|s` + - `users.unique:101|s` + - `users.unique:102|s` <- would result in a count of 2 for `users.unique` - Timings & Histograms - - `load.time:320|ms` - - `load.time.nanoseconds:1|h` - - `load.time:200|ms|@0.1` <- sampled 1/10 of the time + - `load.time:320|ms` + - `load.time.nanoseconds:1|h` + - `load.time:200|ms|@0.1` <- sampled 1/10 of the time - Distributions - - `load.time:320|d` - - `load.time.nanoseconds:1|d` - - `load.time:200|d|@0.1` <- sampled 1/10 of the time + - `load.time:320|d` + - `load.time.nanoseconds:1|d` + - `load.time:200|d|@0.1` <- sampled 1/10 of the time It is possible to omit repetitive names and merge individual stats into a single line by separating them with additional colons: - - `users.current.den001.myapp:32|g:+10|g:-10|g` - - `deploys.test.myservice:1|c:101|c:1|c|@0.1` - - `users.unique:101|s:101|s:102|s` - - `load.time:320|ms:200|ms|@0.1` +- `users.current.den001.myapp:32|g:+10|g:-10|g` +- `deploys.test.myservice:1|c:101|c:1|c|@0.1` +- `users.unique:101|s:101|s:102|s` +- `load.time:320|ms:200|ms|@0.1` This also allows for mixed types in a single line: - - `foo:1|c:200|ms` +- `foo:1|c:200|ms` The string `foo:1|c:200|ms` is internally split into two individual metrics `foo:1|c` and `foo:200|ms` which are added to the aggregator separately. - -### Influx Statsd +## Influx Statsd In order to take advantage of InfluxDB's tagging system, we have made a couple additions to the standard statsd protocol. First, you can specify tags in a manner similar to the line-protocol, like this: -``` +```shell users.current,service=payroll,region=us-west:32|g ``` @@ -139,9 +138,10 @@ users.current,service=payroll,region=us-west:32|g current.users,service=payroll,server=host01:west=10,east=10,central=2,south=10|g ``` --> -### Measurements: +## Measurements Meta: + - tags: `metric_type=` Outputted measurements will depend entirely on the measurements that the user @@ -149,42 +149,42 @@ sends, but here is a brief rundown of what you can expect to find from each metric type: - Gauges - - Gauges are a constant data type. They are not subject to averaging, and they + - Gauges are a constant data type. They are not subject to averaging, and they don’t change unless you change them. That is, once you set a gauge value, it will be a flat line on the graph until you change it again. - Counters - - Counters are the most basic type. They are treated as a count of a type of + - Counters are the most basic type. They are treated as a count of a type of event. They will continually increase unless you set `delete_counters=true`. - Sets - - Sets count the number of unique values passed to a key. For example, you + - Sets count the number of unique values passed to a key. For example, you could count the number of users accessing your system using `users:|s`. No matter how many times the same user_id is sent, the count will only increase by 1. - Timings & Histograms - - Timers are meant to track how long something took. They are an invaluable + - Timers are meant to track how long something took. They are an invaluable tool for tracking application performance. - - The following aggregate measurements are made for timers: - - `statsd__lower`: The lower bound is the lowest value statsd saw + - The following aggregate measurements are made for timers: + - `statsd__lower`: The lower bound is the lowest value statsd saw for that stat during that interval. - - `statsd__upper`: The upper bound is the highest value statsd saw + - `statsd__upper`: The upper bound is the highest value statsd saw for that stat during that interval. - - `statsd__mean`: The mean is the average of all values statsd saw + - `statsd__mean`: The mean is the average of all values statsd saw for that stat during that interval. - - `statsd__stddev`: The stddev is the sample standard deviation + - `statsd__stddev`: The stddev is the sample standard deviation of all values statsd saw for that stat during that interval. - - `statsd__sum`: The sum is the sample sum of all values statsd saw + - `statsd__sum`: The sum is the sample sum of all values statsd saw for that stat during that interval. - - `statsd__count`: The count is the number of timings statsd saw + - `statsd__count`: The count is the number of timings statsd saw for that stat during that interval. It is not averaged. - - `statsd__percentile_

` The `Pth` percentile is a value x such + - `statsd__percentile_

` The `Pth` percentile is a value x such that `P%` of all the values statsd saw for that stat during that time period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. - Distributions - - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. - - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. + - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. + - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. -### Plugin arguments +## Plugin arguments - **protocol** string: Protocol used in listener - tcp or udp options - **max_tcp_connections** []int: Maximum number of concurrent TCP connections @@ -204,12 +204,12 @@ per-measurement in the calculation of percentiles. Raising this limit increases the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. -- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) -- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) -- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format (https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) +- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format () +- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format () +- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format () - **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. -### Statsd bucket -> InfluxDB line-protocol Templates +## Statsd bucket -> InfluxDB line-protocol Templates The plugin supports specifying templates for transforming statsd buckets into InfluxDB measurement names and tags. The templates have a _measurement_ keyword, @@ -217,7 +217,7 @@ which can be used to specify parts of the bucket that are to be used in the measurement name. Other words in the template are used as tag names. For example, the following template: -``` +```toml templates = [ "measurement.measurement.region" ] @@ -225,7 +225,7 @@ templates = [ would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 ``` @@ -233,7 +233,7 @@ cpu.load.us-west:100|g Users can also filter the template to use based on the name of the bucket, using glob matching, like so: -``` +```toml templates = [ "cpu.* measurement.measurement.region", "mem.* measurement.measurement.host" @@ -242,7 +242,7 @@ templates = [ which would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 61f940a8df01d..01c60e3e70171 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -6,7 +6,7 @@ and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. It can also report for triggered Suricata IDS/IPS alerts. -### Configuration +## Configuration ```toml [[inputs.suricata]] @@ -23,14 +23,15 @@ It can also report for triggered Suricata IDS/IPS alerts. alerts = false ``` -### Metrics +## Metrics Fields in the 'suricata' measurement follow the JSON format used by Suricata's stats output. -See http://suricata.readthedocs.io/en/latest/performance/statistics.html for +See for more information. All fields for Suricata stats are numeric. + - suricata - tags: - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics @@ -98,7 +99,7 @@ All fields for Suricata stats are numeric. - tcp_synack - ... -Some fields of the Suricata alerts are strings, for example the signatures. See https://suricata.readthedocs.io/en/suricata-6.0.0/output/eve/eve-json-format.html?highlight=priority#event-type-alert for more information. +Some fields of the Suricata alerts are strings, for example the signatures. See for more information. - suricata_alert - fields: @@ -112,7 +113,7 @@ Some fields of the Suricata alerts are strings, for example the signatures. See - target_port - ... -#### Suricata configuration +### Suricata configuration Suricata needs to deliver the 'stats' event type to a given unix socket for this plugin to pick up. This can be done, for example, by creating an additional @@ -128,11 +129,10 @@ output in the Suricata configuration file: threads: yes ``` -#### FreeBSD tuning - +### FreeBSD tuning -Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192 -otherwise messages from Suricata are truncated as they exceed the default available buffer space, +Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192 +otherwise messages from Suricata are truncated as they exceed the default available buffer space, consequently no statistics are processed by the plugin. ```text @@ -140,8 +140,7 @@ sysctl -w net.local.stream.recvspace=16384 sysctl -w net.local.stream.sendspace=16384 ``` - -### Example Output +## Example Output ```text suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 diff --git a/plugins/inputs/swap/README.md b/plugins/inputs/swap/README.md index 98389287180fa..c538559ca9aa4 100644 --- a/plugins/inputs/swap/README.md +++ b/plugins/inputs/swap/README.md @@ -4,7 +4,7 @@ The swap plugin collects system swap metrics. For more information on what swap memory is, read [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space). -### Configuration: +## Configuration ```toml # Read metrics about swap memory usage @@ -12,7 +12,7 @@ For more information on what swap memory is, read [All about Linux swap space](h # no configuration ``` -### Metrics: +## Metrics - swap - fields: @@ -23,8 +23,8 @@ For more information on what swap memory is, read [All about Linux swap space](h - in (int, bytes): data swapped in since last boot calculated from page number - out (int, bytes): data swapped out since last boot calculated from page number -### Example Output: +## Example Output -``` +```shell swap total=20855394304i,used_percent=45.43883523785713,used=9476448256i,free=1715331072i 1511894782000000000 ``` diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md index efb8203515c69..117ee02e8fa97 100644 --- a/plugins/inputs/synproxy/README.md +++ b/plugins/inputs/synproxy/README.md @@ -1,10 +1,9 @@ # Synproxy Input Plugin -The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. +The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section. - -### Configuration +## Configuration The synproxy plugin does not need any configuration @@ -13,7 +12,7 @@ The synproxy plugin does not need any configuration # no configuration ``` -### Metrics +## Metrics The following synproxy counters are gathered @@ -26,24 +25,26 @@ The following synproxy counters are gathered - syn_received (uint32, packets, counter) - SYN received - conn_reopened (uint32, packets, counter) - Connections reopened -### Sample Queries +## Sample Queries Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB: + ```sql SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null); ``` -### Troubleshooting +## Troubleshooting Execute the following CLI command in Linux to test the synproxy counters: + ```sh cat /proc/net/stat/synproxy ``` -### Example Output +## Example Output This section shows example output in Line Protocol format. -``` +```shell synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000 ``` diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index a821a642b0ec8..d2c763e4ec6a0 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -9,7 +9,7 @@ a Unix Domain socket, Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). -### Configuration +## Configuration ```toml [[inputs.syslog]] @@ -68,20 +68,20 @@ Syslog messages should be formatted according to # sdparam_separator = "_" ``` -#### Message transport +### Message transport The `framing` option only applies to streams. It governs the way we expect to receive messages within the stream. Namely, with the [`"octet counting"`](https://tools.ietf.org/html/rfc5425#section-4.3) technique (default) or with the [`"non-transparent"`](https://tools.ietf.org/html/rfc6587#section-3.4.2) framing. The `trailer` option only applies when `framing` option is `"non-transparent"`. It must have one of the following values: `"LF"` (default), or `"NUL"`. -#### Best effort +### Best effort The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog messages. If unset only full messages will be collected. -#### Rsyslog Integration +### Rsyslog Integration Rsyslog can be configured to forward logging messages to Telegraf by configuring [remote logging](https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine). @@ -93,7 +93,8 @@ config file. Add the following lines to `/etc/rsyslog.d/50-telegraf.conf` making adjustments to the target address as needed: -``` + +```shell $ActionQueueType LinkedList # use asynchronous processing $ActionQueueFileName srvrfwd # set file name, also enables disk mode $ActionResumeRetryCount -1 # infinite retries on insert failure @@ -107,7 +108,8 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down ``` You can alternately use `advanced` format (aka RainerScript): -``` + +```bash # forward over tcp with octet framing according to RFC 5425 action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") @@ -117,7 +119,7 @@ action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1 To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). -### Metrics +## Metrics - syslog - tags @@ -136,17 +138,19 @@ To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc - *Structured Data* (string) - timestamp: the time the messages was received -#### Structured Data +### Structured Data Structured data produces field keys by combining the `SD_ID` with the `PARAM_NAME` combined using the `sdparam_separator` as in the following example: -``` + +```shell 170 <165>1 2018-10-01:14:15.000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] An application event log entry... ``` -``` + +```shell syslog,appname=evntslog,facility=local4,hostname=mymachine.example.com,severity=notice exampleSDID@32473_eventID="1011",exampleSDID@32473_eventSource="Application",exampleSDID@32473_iut="3",facility_code=20i,message="An application event log entry...",msgid="ID47",severity_code=5i,timestamp=1065910455003000000i,version=1i 1538421339749472344 ``` -### Troubleshooting +## Troubleshooting You can send debugging messages directly to the input plugin using netcat: @@ -158,14 +162,16 @@ echo "57 <13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc 127.0.0. echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0.1 6514 ``` -#### RFC3164 +### RFC3164 RFC3164 encoded messages are supported for UDP only, but not all vendors output valid RFC3164 messages by default - E.g. Cisco IOS If you see the following error, it is due to a message encoded in this format: - ``` + + ```shell E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] ``` - You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. \ No newline at end of file + + You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md index 9775c1a305c95..5c055a3c51af4 100644 --- a/plugins/inputs/sysstat/README.md +++ b/plugins/inputs/sysstat/README.md @@ -6,7 +6,7 @@ package installed. This plugin collects system metrics with the sysstat collector utility `sadc` and parses the created binary data file with the `sadf` utility. -### Configuration: +## Configuration ```toml # Sysstat metrics collector @@ -38,22 +38,22 @@ the created binary data file with the `sadf` utility. ## ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - -r = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" - # -H = "hugepages" # only available for newer linux distributions - # "-I ALL" = "interrupts" # requires INT activity + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + -r = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # -H = "hugepages" # only available for newer linux distributions + # "-I ALL" = "interrupts" # requires INT activity ## Device tags can be used to add additional tags for devices. For example the configuration below ## adds a tag vg with value rootvg for all metrics with sda devices. @@ -61,94 +61,100 @@ the created binary data file with the `sadf` utility. # vg = "rootvg" ``` -### Measurements & Fields: -#### If group=true +## Measurements & Fields + +### If group=true + - cpu - - pct_idle (float) - - pct_iowait (float) - - pct_nice (float) - - pct_steal (float) - - pct_system (float) - - pct_user (float) + - pct_idle (float) + - pct_iowait (float) + - pct_nice (float) + - pct_steal (float) + - pct_system (float) + - pct_user (float) - disk - - avgqu-sz (float) - - avgrq-sz (float) - - await (float) - - pct_util (float) - - rd_sec_pers (float) - - svctm (float) - - tps (float) + - avgqu-sz (float) + - avgrq-sz (float) + - await (float) + - pct_util (float) + - rd_sec_pers (float) + - svctm (float) + - tps (float) And much more, depending on the options you configure. -#### If group=false +### If group=false + - cpu_pct_idle - - value (float) + - value (float) - cpu_pct_iowait - - value (float) + - value (float) - cpu_pct_nice - - value (float) + - value (float) - cpu_pct_steal - - value (float) + - value (float) - cpu_pct_system - - value (float) + - value (float) - cpu_pct_user - - value (float) + - value (float) - disk_avgqu-sz - - value (float) + - value (float) - disk_avgrq-sz - - value (float) + - value (float) - disk_await - - value (float) + - value (float) - disk_pct_util - - value (float) + - value (float) - disk_rd_sec_per_s - - value (float) + - value (float) - disk_svctm - - value (float) + - value (float) - disk_tps - - value (float) + - value (float) And much more, depending on the options you configure. -### Tags: +## Tags - All measurements have the following tags: - - device + - device And more if you define some `device_tags`. -### Example Output: + +## Example Output With the configuration below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = true [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf --config telegraf.conf --input-filter sysstat --test * Plugin: sysstat, Collection 1 > cpu_util,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626657883725 @@ -189,34 +195,36 @@ $ telegraf --config telegraf.conf --input-filter sysstat --test ``` If you change the group value to false like below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = false [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf -config telegraf.conf -input-filter sysstat -test * Plugin: sysstat, Collection 1 > io_tps value=0.5 1459255780126025822 diff --git a/plugins/inputs/system/README.md b/plugins/inputs/system/README.md index 8b16c1de08d25..4f87a8342b9bb 100644 --- a/plugins/inputs/system/README.md +++ b/plugins/inputs/system/README.md @@ -5,33 +5,34 @@ and number of users logged in. It is similar to the unix `uptime` command. Number of CPUs is obtained from the /proc/cpuinfo file. -### Configuration: +## Configuration ```toml # Read metrics about system load & uptime [[inputs.system]] # no configuration ``` -#### Permissions: + +### Permissions The `n_users` field requires read access to `/var/run/utmp`, and may require the `telegraf` user to be added to the `utmp` group on some systems. If this file does not exist `n_users` will be skipped. -### Metrics: +## Metrics - system - fields: - - load1 (float) - - load15 (float) - - load5 (float) - - n_users (integer) - - n_cpus (integer) - - uptime (integer, seconds) - - uptime_format (string, deprecated in 1.10, use `uptime` field) + - load1 (float) + - load15 (float) + - load5 (float) + - n_users (integer) + - n_cpus (integer) + - uptime (integer, seconds) + - uptime_format (string, deprecated in 1.10, use `uptime` field) -### Example Output: +## Example Output -``` +```shell system,host=tyrion load1=3.72,load5=2.4,load15=2.1,n_users=3i,n_cpus=4i 1483964144000000000 system,host=tyrion uptime=1249632i 1483964144000000000 system,host=tyrion uptime_format="14 days, 11:07" 1483964144000000000 diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index f9d47d7df1252..65344d0367629 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -12,7 +12,8 @@ fulfills the same purpose on windows. In addition to services, this plugin can gather other unit types as well, see `systemctl list-units --all --type help` for possible options. -### Configuration +## Configuration + ```toml [[inputs.systemd_units]] ## Set timeout for systemctl execution @@ -31,7 +32,8 @@ see `systemctl list-units --all --type help` for possible options. ## pattern = "a*" ``` -### Metrics +## Metrics + - systemd_units: - tags: - name (string, unit name) @@ -43,7 +45,7 @@ see `systemctl list-units --all --type help` for possible options. - active_code (int, see below) - sub_code (int, see below) -#### Load +### Load enumeration of [unit_load_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L87) @@ -57,7 +59,7 @@ enumeration of [unit_load_state_table](https://github.com/systemd/systemd/blob/c | 5 | merged | unit is ~ | | 6 | masked | unit is ~ | -#### Active +### Active enumeration of [unit_active_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L99) @@ -70,7 +72,7 @@ enumeration of [unit_active_state_table](https://github.com/systemd/systemd/blob | 4 | activating | unit is ~ | | 5 | deactivating | unit is ~ | -#### Sub +### Sub enumeration of sub states, see various [unittype_state_tables](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L163); duplicates were removed, tables are hex aligned to keep some space for future @@ -132,9 +134,9 @@ values | 0x00a0 | elapsed | unit is ~ | | | | | -### Example Output +## Example Output -``` +```shell systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000 systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index abdf0878aff56..e8cc1b4d4a6c5 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -4,7 +4,7 @@ The tail plugin "tails" a logfile and parses each log message. By default, the tail plugin acts like the following unix tail command: -``` +```shell tail -F --lines=0 myfile.log ``` @@ -14,12 +14,12 @@ inaccessible files. - `--lines=0` means that it will start at the end of the file (unless the `from_beginning` option is set). -see http://man7.org/linux/man-pages/man1/tail.1.html for more details. +see for more details. The plugin expects messages in one of the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -### Configuration +## Configuration ```toml [[inputs.tail]] @@ -85,7 +85,7 @@ The plugin expects messages in one of the #timeout = 5s ``` -### Metrics +## Metrics Metrics are produced according to the `data_format` option. Additionally a tag labeled `path` is added to the metric containing the filename being tailed. diff --git a/plugins/inputs/teamspeak/README.md b/plugins/inputs/teamspeak/README.md index ef3f0d8d9377b..1697f884f1cc3 100644 --- a/plugins/inputs/teamspeak/README.md +++ b/plugins/inputs/teamspeak/README.md @@ -2,10 +2,10 @@ This plugin uses the Teamspeak 3 ServerQuery interface of the Teamspeak server to collect statistics of one or more virtual servers. If you are querying an external Teamspeak server, make sure to add the host which is running Telegraf -to query_ip_whitelist.txt in the Teamspeak Server directory. For information about how to configure the server take a look +to query_ip_whitelist.txt in the Teamspeak Server directory. For information about how to configure the server take a look the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/TeamSpeak%203%20Server%20Query%20Manual.pdf) -### Configuration: +## Configuration ```toml # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -20,27 +20,27 @@ the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/T # virtual_servers = [1] ``` -### Measurements: +## Measurements - teamspeak - - uptime - - clients_online - - total_ping - - total_packet_loss - - packets_sent_total - - packets_received_total - - bytes_sent_total - - bytes_received_total - - query_clients_online - -### Tags: + - uptime + - clients_online + - total_ping + - total_packet_loss + - packets_sent_total + - packets_received_total + - bytes_sent_total + - bytes_received_total + - query_clients_online + +## Tags - The following tags are used: - - virtual_server - - name + - virtual_server + - name -### Example output: +## Example output -``` +```shell teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i,query_clients_online=1i 1507406561000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md index 95db4a3bb786f..5c55fa4b0d328 100644 --- a/plugins/inputs/temp/README.md +++ b/plugins/inputs/temp/README.md @@ -5,14 +5,14 @@ meant to be multi platform and uses platform specific collection methods. Currently supports Linux and Windows. -### Configuration +## Configuration ```toml [[inputs.temp]] # no configuration ``` -### Metrics +## Metrics - temp - tags: @@ -20,18 +20,18 @@ Currently supports Linux and Windows. - fields: - temp (float, celcius) - -### Troubleshooting +## Troubleshooting On **Windows**, the plugin uses a WMI call that is can be replicated with the following command: -``` + +```shell wmic /namespace:\\root\wmi PATH MSAcpi_ThermalZoneTemperature ``` -### Example Output +## Example Output -``` +```shell temp,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 temp,sensor=coretemp_physicalid0_critalarm temp=0 1531298763000000000 temp,sensor=coretemp_physicalid0_input temp=100 1531298763000000000 diff --git a/plugins/inputs/tengine/README.md b/plugins/inputs/tengine/README.md index 9bc83c5d6e574..d70c304ef9559 100644 --- a/plugins/inputs/tengine/README.md +++ b/plugins/inputs/tengine/README.md @@ -4,7 +4,7 @@ The tengine plugin gathers metrics from the [Tengine Web Server](http://tengine.taobao.org/) via the [reqstat](http://tengine.taobao.org/document/http_reqstat.html) module. -### Configuration: +## Configuration ```toml # Read Tengine's basic status information (ngx_http_reqstat_module) @@ -23,7 +23,7 @@ The tengine plugin gathers metrics from the # insecure_skip_verify = false ``` -### Metrics: +## Metrics - Measurement - tags: @@ -60,9 +60,9 @@ The tengine plugin gathers metrics from the - http_other_detail_status (integer, total number of requests of other status codes*http_ups_4xx total number of requests of upstream 4xx) - http_ups_5xx (integer, total number of requests of upstream 5xx) -### Example Output: +## Example Output -``` +```shell tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=localhost bytes_in=9129i,bytes_out=56334i,conn_total=14i,http_200=90i,http_206=0i,http_2xx=90i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=90i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000 tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=28.79.190.35.bc.googleusercontent.com bytes_in=1500i,bytes_out=3009i,conn_total=4i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=1i,http_416=0i,http_499=0i,http_4xx=3i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=4i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000 tengine,host=gcp-thz-api-5,port=80,server=localhost,server_name=www.google.com bytes_in=372i,bytes_out=786i,conn_total=1i,http_200=1i,http_206=0i,http_2xx=1i,http_302=0i,http_304=0i,http_3xx=0i,http_403=0i,http_404=0i,http_416=0i,http_499=0i,http_4xx=0i,http_500=0i,http_502=0i,http_503=0i,http_504=0i,http_508=0i,http_5xx=0i,http_other_detail_status=0i,http_other_status=0i,http_ups_4xx=0i,http_ups_5xx=0i,req_total=1i,rt=0i,ups_req=0i,ups_rt=0i,ups_tries=0i 1526546308000000000 diff --git a/plugins/inputs/tomcat/README.md b/plugins/inputs/tomcat/README.md index 1399a3157199c..68080c2ece624 100644 --- a/plugins/inputs/tomcat/README.md +++ b/plugins/inputs/tomcat/README.md @@ -4,7 +4,7 @@ The Tomcat plugin collects statistics available from the tomcat manager status p See the [Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager-howto.html#Server_Status) for details of these statistics. -### Configuration: +## Configuration ```toml # Gather metrics from the Tomcat server status page. @@ -27,7 +27,7 @@ See the [Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager- # insecure_skip_verify = false ``` -### Measurements & Fields: +## Measurements & Fields - tomcat_jvm_memory - free @@ -54,7 +54,7 @@ See the [Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager- - bytes_received - bytes_sent -### Tags: +## Tags - tomcat_jvm_memorypool has the following tags: - name @@ -62,9 +62,9 @@ See the [Tomcat documentation](https://tomcat.apache.org/tomcat-9.0-doc/manager- - tomcat_connector - name -### Example Output: +## Example Output -``` +```shell tomcat_jvm_memory,host=N8-MBP free=20014352i,max=127729664i,total=41459712i 1474663361000000000 tomcat_jvm_memorypool,host=N8-MBP,name=Eden\ Space,type=Heap\ memory committed=11534336i,init=2228224i,max=35258368i,used=1941200i 1474663361000000000 tomcat_jvm_memorypool,host=N8-MBP,name=Survivor\ Space,type=Heap\ memory committed=1376256i,init=262144i,max=4390912i,used=1376248i 1474663361000000000 diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md index 41ff8743e8cf3..9ece5604ac697 100644 --- a/plugins/inputs/trig/README.md +++ b/plugins/inputs/trig/README.md @@ -2,7 +2,7 @@ The `trig` plugin is for demonstration purposes and inserts sine and cosine -### Configuration +## Configuration ```toml # Inserts sine and cosine waves for demonstration purposes @@ -11,17 +11,16 @@ The `trig` plugin is for demonstration purposes and inserts sine and cosine amplitude = 10.0 ``` -### Metrics +## Metrics - trig - fields: - cosine (float) - sine (float) +## Example Output -### Example Output - -``` +```shell trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md index 0c07e0aec4463..6242f9b3c347f 100644 --- a/plugins/inputs/twemproxy/README.md +++ b/plugins/inputs/twemproxy/README.md @@ -2,8 +2,7 @@ The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. - -### Configuration +## Configuration ```toml # Read Twemproxy stats data @@ -13,4 +12,3 @@ The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/tw ## Monitor pool name pools = ["redis_pool", "mc_pool"] ``` - diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index 1ccd183bc643c..025bb1727031e 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -3,7 +3,7 @@ This plugin gathers stats from [Unbound](https://www.unbound.net/) - a validating, recursive, and caching DNS resolver. -### Configuration: +## Configuration ```toml # A plugin to collect stats from the Unbound DNS resolver @@ -32,12 +32,13 @@ a validating, recursive, and caching DNS resolver. thread_as_tag = false ``` -#### Permissions: +### Permissions It's important to note that this plugin references unbound-control, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -50,12 +51,14 @@ telegraf : telegraf unbound **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.unbound]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -66,13 +69,13 @@ Defaults!UNBOUNDCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Metrics: +## Metrics This is the full list of stats provided by unbound-control and potentially collected depending of your unbound configuration. Histogram related statistics will never be collected, extended statistics can also be imported ("extended-statistics: yes" in unbound configuration). In the output, the dots in the unbound-control stat name are replaced by underscores(see -https://www.unbound.net/documentation/unbound-control.html for details). + for details). Shown metrics are with `thread_as_tag` enabled. @@ -147,8 +150,9 @@ Shown metrics are with `thread_as_tag` enabled. - recursion_time_avg - recursion_time_median -### Example Output: -``` +## Example Output + +```shell unbound,host=localhost total_requestlist_avg=0,total_requestlist_exceeded=0,total_requestlist_overwritten=0,total_requestlist_current_user=0,total_recursion_time_avg=0.029186,total_tcpusage=0,total_num_queries=51,total_num_queries_ip_ratelimited=0,total_num_recursivereplies=6,total_requestlist_max=0,time_now=1522804978.784814,time_elapsed=310.435217,total_num_cachemiss=6,total_num_zero_ttl=0,time_up=310.435217,total_num_cachehits=45,total_num_prefetch=0,total_requestlist_current_all=0,total_recursion_time_median=0.016384 1522804979000000000 unbound_threads,host=localhost,thread=0 num_queries_ip_ratelimited=0,requestlist_current_user=0,recursion_time_avg=0.029186,num_prefetch=0,requestlist_overwritten=0,requestlist_exceeded=0,requestlist_current_all=0,tcpusage=0,num_cachehits=37,num_cachemiss=6,num_recursivereplies=6,requestlist_avg=0,num_queries=43,num_zero_ttl=0,requestlist_max=0,recursion_time_median=0.032768 1522804979000000000 unbound_threads,host=localhost,thread=1 num_zero_ttl=0,recursion_time_avg=0,num_queries_ip_ratelimited=0,num_cachehits=8,num_prefetch=0,requestlist_exceeded=0,recursion_time_median=0,tcpusage=0,num_cachemiss=0,num_recursivereplies=0,requestlist_max=0,requestlist_overwritten=0,requestlist_current_user=0,num_queries=8,requestlist_avg=0,requestlist_current_all=0 1522804979000000000 diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md index 9a6d42764e3ef..2c99b04be6d13 100644 --- a/plugins/inputs/uwsgi/README.md +++ b/plugins/inputs/uwsgi/README.md @@ -2,7 +2,7 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). -### Configuration +## Configuration ```toml [[inputs.uwsgi]] @@ -17,23 +17,22 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](http # timeout = "5s" ``` +## Metrics -### Metrics: +- uwsgi_overview +- tags: + - source + - uid + - gid + - version +- fields: + - listen_queue + - listen_queue_errors + - signal_queue + - load + - pid - - uwsgi_overview - - tags: - - source - - uid - - gid - - version - - fields: - - listen_queue - - listen_queue_errors - - signal_queue - - load - - pid - -+ uwsgi_workers +- uwsgi_workers - tags: - worker_id - source @@ -66,7 +65,7 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](http - startup_time - exceptions -+ uwsgi_cores +- uwsgi_cores - tags: - core_id - worker_id @@ -78,15 +77,13 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](http - offloaded_requests - write_errors - read_errors - - in_request - + - in_request -### Example Output: +## Example Output -``` +```shell uwsgi_overview,gid=0,uid=0,source=172.17.0.2,version=2.0.18 listen_queue=0i,listen_queue_errors=0i,load=0i,pid=1i,signal_queue=0i 1564441407000000000 uwsgi_workers,source=172.17.0.2,worker_id=1 accepting=1i,avg_rt=0i,delta_request=0i,exceptions=0i,harakiri_count=0i,last_spawn=1564441202i,pid=6i,requests=0i,respawn_count=1i,rss=0i,running_time=0i,signal_queue=0i,signals=0i,status="idle",tx=0i,vsz=0i 1564441407000000000 uwsgi_apps,app_id=0,worker_id=1,source=172.17.0.2 exceptions=0i,modifier1=0i,requests=0i,startup_time=0i 1564441407000000000 uwsgi_cores,core_id=0,worker_id=1,source=172.17.0.2 in_request=0i,offloaded_requests=0i,read_errors=0i,requests=0i,routed_requests=0i,static_requests=0i,write_errors=0i 1564441407000000000 ``` - diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 2db1498040f25..8de919a501ac4 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -2,7 +2,7 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) -### Configuration: +## Configuration ```toml [[inputs.varnish]] @@ -26,311 +26,311 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) # timeout = "1s" ``` -### Measurements & Fields: +## Measurements & Fields -This is the full list of stats provided by varnish. Stats will be grouped by their capitalized prefix (eg MAIN, +This is the full list of stats provided by varnish. Stats will be grouped by their capitalized prefix (eg MAIN, MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from field names. - varnish - - MAIN.uptime (uint64, count, Child process uptime) - - MAIN.sess_conn (uint64, count, Sessions accepted) - - MAIN.sess_drop (uint64, count, Sessions dropped) - - MAIN.sess_fail (uint64, count, Session accept failures) - - MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow) - - MAIN.client_req_400 (uint64, count, Client requests received,) - - MAIN.client_req_411 (uint64, count, Client requests received,) - - MAIN.client_req_413 (uint64, count, Client requests received,) - - MAIN.client_req_417 (uint64, count, Client requests received,) - - MAIN.client_req (uint64, count, Good client requests) - - MAIN.cache_hit (uint64, count, Cache hits) - - MAIN.cache_hitpass (uint64, count, Cache hits for) - - MAIN.cache_miss (uint64, count, Cache misses) - - MAIN.backend_conn (uint64, count, Backend conn. success) - - MAIN.backend_unhealthy (uint64, count, Backend conn. not) - - MAIN.backend_busy (uint64, count, Backend conn. too) - - MAIN.backend_fail (uint64, count, Backend conn. failures) - - MAIN.backend_reuse (uint64, count, Backend conn. reuses) - - MAIN.backend_toolate (uint64, count, Backend conn. was) - - MAIN.backend_recycle (uint64, count, Backend conn. recycles) - - MAIN.backend_retry (uint64, count, Backend conn. retry) - - MAIN.fetch_head (uint64, count, Fetch no body) - - MAIN.fetch_length (uint64, count, Fetch with Length) - - MAIN.fetch_chunked (uint64, count, Fetch chunked) - - MAIN.fetch_eof (uint64, count, Fetch EOF) - - MAIN.fetch_bad (uint64, count, Fetch bad T- E) - - MAIN.fetch_close (uint64, count, Fetch wanted close) - - MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1) - - MAIN.fetch_zero (uint64, count, Fetch zero len) - - MAIN.fetch_1xx (uint64, count, Fetch no body) - - MAIN.fetch_204 (uint64, count, Fetch no body) - - MAIN.fetch_304 (uint64, count, Fetch no body) - - MAIN.fetch_failed (uint64, count, Fetch failed (all) - - MAIN.fetch_no_thread (uint64, count, Fetch failed (no) - - MAIN.pools (uint64, count, Number of thread) - - MAIN.threads (uint64, count, Total number of) - - MAIN.threads_limited (uint64, count, Threads hit max) - - MAIN.threads_created (uint64, count, Threads created) - - MAIN.threads_destroyed (uint64, count, Threads destroyed) - - MAIN.threads_failed (uint64, count, Thread creation failed) - - MAIN.thread_queue_len (uint64, count, Length of session) - - MAIN.busy_sleep (uint64, count, Number of requests) - - MAIN.busy_wakeup (uint64, count, Number of requests) - - MAIN.sess_queued (uint64, count, Sessions queued for) - - MAIN.sess_dropped (uint64, count, Sessions dropped for) - - MAIN.n_object (uint64, count, object structs made) - - MAIN.n_vampireobject (uint64, count, unresurrected objects) - - MAIN.n_objectcore (uint64, count, objectcore structs made) - - MAIN.n_objecthead (uint64, count, objecthead structs made) - - MAIN.n_waitinglist (uint64, count, waitinglist structs made) - - MAIN.n_backend (uint64, count, Number of backends) - - MAIN.n_expired (uint64, count, Number of expired) - - MAIN.n_lru_nuked (uint64, count, Number of LRU) - - MAIN.n_lru_moved (uint64, count, Number of LRU) - - MAIN.losthdr (uint64, count, HTTP header overflows) - - MAIN.s_sess (uint64, count, Total sessions seen) - - MAIN.s_req (uint64, count, Total requests seen) - - MAIN.s_pipe (uint64, count, Total pipe sessions) - - MAIN.s_pass (uint64, count, Total pass- ed requests) - - MAIN.s_fetch (uint64, count, Total backend fetches) - - MAIN.s_synth (uint64, count, Total synthetic responses) - - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) - - MAIN.s_req_bodybytes (uint64, count, Request body bytes) - - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) - - MAIN.s_resp_bodybytes (uint64, count, Response body bytes) - - MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header) - - MAIN.s_pipe_in (uint64, count, Piped bytes from) - - MAIN.s_pipe_out (uint64, count, Piped bytes to) - - MAIN.sess_closed (uint64, count, Session Closed) - - MAIN.sess_pipeline (uint64, count, Session Pipeline) - - MAIN.sess_readahead (uint64, count, Session Read Ahead) - - MAIN.sess_herd (uint64, count, Session herd) - - MAIN.shm_records (uint64, count, SHM records) - - MAIN.shm_writes (uint64, count, SHM writes) - - MAIN.shm_flushes (uint64, count, SHM flushes due) - - MAIN.shm_cont (uint64, count, SHM MTX contention) - - MAIN.shm_cycles (uint64, count, SHM cycles through) - - MAIN.sms_nreq (uint64, count, SMS allocator requests) - - MAIN.sms_nobj (uint64, count, SMS outstanding allocations) - - MAIN.sms_nbytes (uint64, count, SMS outstanding bytes) - - MAIN.sms_balloc (uint64, count, SMS bytes allocated) - - MAIN.sms_bfree (uint64, count, SMS bytes freed) - - MAIN.backend_req (uint64, count, Backend requests made) - - MAIN.n_vcl (uint64, count, Number of loaded) - - MAIN.n_vcl_avail (uint64, count, Number of VCLs) - - MAIN.n_vcl_discard (uint64, count, Number of discarded) - - MAIN.bans (uint64, count, Count of bans) - - MAIN.bans_completed (uint64, count, Number of bans) - - MAIN.bans_obj (uint64, count, Number of bans) - - MAIN.bans_req (uint64, count, Number of bans) - - MAIN.bans_added (uint64, count, Bans added) - - MAIN.bans_deleted (uint64, count, Bans deleted) - - MAIN.bans_tested (uint64, count, Bans tested against) - - MAIN.bans_obj_killed (uint64, count, Objects killed by) - - MAIN.bans_lurker_tested (uint64, count, Bans tested against) - - MAIN.bans_tests_tested (uint64, count, Ban tests tested) - - MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested) - - MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by) - - MAIN.bans_dups (uint64, count, Bans superseded by) - - MAIN.bans_lurker_contention (uint64, count, Lurker gave way) - - MAIN.bans_persisted_bytes (uint64, count, Bytes used by) - - MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in) - - MAIN.n_purges (uint64, count, Number of purge) - - MAIN.n_obj_purged (uint64, count, Number of purged) - - MAIN.exp_mailed (uint64, count, Number of objects) - - MAIN.exp_received (uint64, count, Number of objects) - - MAIN.hcb_nolock (uint64, count, HCB Lookups without) - - MAIN.hcb_lock (uint64, count, HCB Lookups with) - - MAIN.hcb_insert (uint64, count, HCB Inserts) - - MAIN.esi_errors (uint64, count, ESI parse errors) - - MAIN.esi_warnings (uint64, count, ESI parse warnings) - - MAIN.vmods (uint64, count, Loaded VMODs) - - MAIN.n_gzip (uint64, count, Gzip operations) - - MAIN.n_gunzip (uint64, count, Gunzip operations) - - MAIN.vsm_free (uint64, count, Free VSM space) - - MAIN.vsm_used (uint64, count, Used VSM space) - - MAIN.vsm_cooling (uint64, count, Cooling VSM space) - - MAIN.vsm_overflow (uint64, count, Overflow VSM space) - - MAIN.vsm_overflowed (uint64, count, Overflowed VSM space) - - MGT.uptime (uint64, count, Management process uptime) - - MGT.child_start (uint64, count, Child process started) - - MGT.child_exit (uint64, count, Child process normal) - - MGT.child_stop (uint64, count, Child process unexpected) - - MGT.child_died (uint64, count, Child process died) - - MGT.child_dump (uint64, count, Child process core) - - MGT.child_panic (uint64, count, Child process panic) - - MEMPOOL.vbc.live (uint64, count, In use) - - MEMPOOL.vbc.pool (uint64, count, In Pool) - - MEMPOOL.vbc.sz_wanted (uint64, count, Size requested) - - MEMPOOL.vbc.sz_needed (uint64, count, Size allocated) - - MEMPOOL.vbc.allocs (uint64, count, Allocations ) - - MEMPOOL.vbc.frees (uint64, count, Frees ) - - MEMPOOL.vbc.recycle (uint64, count, Recycled from pool) - - MEMPOOL.vbc.timeout (uint64, count, Timed out from) - - MEMPOOL.vbc.toosmall (uint64, count, Too small to) - - MEMPOOL.vbc.surplus (uint64, count, Too many for) - - MEMPOOL.vbc.randry (uint64, count, Pool ran dry) - - MEMPOOL.busyobj.live (uint64, count, In use) - - MEMPOOL.busyobj.pool (uint64, count, In Pool) - - MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested) - - MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated) - - MEMPOOL.busyobj.allocs (uint64, count, Allocations ) - - MEMPOOL.busyobj.frees (uint64, count, Frees ) - - MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool) - - MEMPOOL.busyobj.timeout (uint64, count, Timed out from) - - MEMPOOL.busyobj.toosmall (uint64, count, Too small to) - - MEMPOOL.busyobj.surplus (uint64, count, Too many for) - - MEMPOOL.busyobj.randry (uint64, count, Pool ran dry) - - MEMPOOL.req0.live (uint64, count, In use) - - MEMPOOL.req0.pool (uint64, count, In Pool) - - MEMPOOL.req0.sz_wanted (uint64, count, Size requested) - - MEMPOOL.req0.sz_needed (uint64, count, Size allocated) - - MEMPOOL.req0.allocs (uint64, count, Allocations ) - - MEMPOOL.req0.frees (uint64, count, Frees ) - - MEMPOOL.req0.recycle (uint64, count, Recycled from pool) - - MEMPOOL.req0.timeout (uint64, count, Timed out from) - - MEMPOOL.req0.toosmall (uint64, count, Too small to) - - MEMPOOL.req0.surplus (uint64, count, Too many for) - - MEMPOOL.req0.randry (uint64, count, Pool ran dry) - - MEMPOOL.sess0.live (uint64, count, In use) - - MEMPOOL.sess0.pool (uint64, count, In Pool) - - MEMPOOL.sess0.sz_wanted (uint64, count, Size requested) - - MEMPOOL.sess0.sz_needed (uint64, count, Size allocated) - - MEMPOOL.sess0.allocs (uint64, count, Allocations ) - - MEMPOOL.sess0.frees (uint64, count, Frees ) - - MEMPOOL.sess0.recycle (uint64, count, Recycled from pool) - - MEMPOOL.sess0.timeout (uint64, count, Timed out from) - - MEMPOOL.sess0.toosmall (uint64, count, Too small to) - - MEMPOOL.sess0.surplus (uint64, count, Too many for) - - MEMPOOL.sess0.randry (uint64, count, Pool ran dry) - - MEMPOOL.req1.live (uint64, count, In use) - - MEMPOOL.req1.pool (uint64, count, In Pool) - - MEMPOOL.req1.sz_wanted (uint64, count, Size requested) - - MEMPOOL.req1.sz_needed (uint64, count, Size allocated) - - MEMPOOL.req1.allocs (uint64, count, Allocations ) - - MEMPOOL.req1.frees (uint64, count, Frees ) - - MEMPOOL.req1.recycle (uint64, count, Recycled from pool) - - MEMPOOL.req1.timeout (uint64, count, Timed out from) - - MEMPOOL.req1.toosmall (uint64, count, Too small to) - - MEMPOOL.req1.surplus (uint64, count, Too many for) - - MEMPOOL.req1.randry (uint64, count, Pool ran dry) - - MEMPOOL.sess1.live (uint64, count, In use) - - MEMPOOL.sess1.pool (uint64, count, In Pool) - - MEMPOOL.sess1.sz_wanted (uint64, count, Size requested) - - MEMPOOL.sess1.sz_needed (uint64, count, Size allocated) - - MEMPOOL.sess1.allocs (uint64, count, Allocations ) - - MEMPOOL.sess1.frees (uint64, count, Frees ) - - MEMPOOL.sess1.recycle (uint64, count, Recycled from pool) - - MEMPOOL.sess1.timeout (uint64, count, Timed out from) - - MEMPOOL.sess1.toosmall (uint64, count, Too small to) - - MEMPOOL.sess1.surplus (uint64, count, Too many for) - - MEMPOOL.sess1.randry (uint64, count, Pool ran dry) - - SMA.s0.c_req (uint64, count, Allocator requests) - - SMA.s0.c_fail (uint64, count, Allocator failures) - - SMA.s0.c_bytes (uint64, count, Bytes allocated) - - SMA.s0.c_freed (uint64, count, Bytes freed) - - SMA.s0.g_alloc (uint64, count, Allocations outstanding) - - SMA.s0.g_bytes (uint64, count, Bytes outstanding) - - SMA.s0.g_space (uint64, count, Bytes available) - - SMA.Transient.c_req (uint64, count, Allocator requests) - - SMA.Transient.c_fail (uint64, count, Allocator failures) - - SMA.Transient.c_bytes (uint64, count, Bytes allocated) - - SMA.Transient.c_freed (uint64, count, Bytes freed) - - SMA.Transient.g_alloc (uint64, count, Allocations outstanding) - - SMA.Transient.g_bytes (uint64, count, Bytes outstanding) - - SMA.Transient.g_space (uint64, count, Bytes available) - - VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references) - - VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes) - - VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Request header bytes) - - VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes) - - VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Response header bytes) - - VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes) - - VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header) - - VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to) - - VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from) - - LCK.sms.creat (uint64, count, Created locks) - - LCK.sms.destroy (uint64, count, Destroyed locks) - - LCK.sms.locks (uint64, count, Lock Operations) - - LCK.smp.creat (uint64, count, Created locks) - - LCK.smp.destroy (uint64, count, Destroyed locks) - - LCK.smp.locks (uint64, count, Lock Operations) - - LCK.sma.creat (uint64, count, Created locks) - - LCK.sma.destroy (uint64, count, Destroyed locks) - - LCK.sma.locks (uint64, count, Lock Operations) - - LCK.smf.creat (uint64, count, Created locks) - - LCK.smf.destroy (uint64, count, Destroyed locks) - - LCK.smf.locks (uint64, count, Lock Operations) - - LCK.hsl.creat (uint64, count, Created locks) - - LCK.hsl.destroy (uint64, count, Destroyed locks) - - LCK.hsl.locks (uint64, count, Lock Operations) - - LCK.hcb.creat (uint64, count, Created locks) - - LCK.hcb.destroy (uint64, count, Destroyed locks) - - LCK.hcb.locks (uint64, count, Lock Operations) - - LCK.hcl.creat (uint64, count, Created locks) - - LCK.hcl.destroy (uint64, count, Destroyed locks) - - LCK.hcl.locks (uint64, count, Lock Operations) - - LCK.vcl.creat (uint64, count, Created locks) - - LCK.vcl.destroy (uint64, count, Destroyed locks) - - LCK.vcl.locks (uint64, count, Lock Operations) - - LCK.sessmem.creat (uint64, count, Created locks) - - LCK.sessmem.destroy (uint64, count, Destroyed locks) - - LCK.sessmem.locks (uint64, count, Lock Operations) - - LCK.sess.creat (uint64, count, Created locks) - - LCK.sess.destroy (uint64, count, Destroyed locks) - - LCK.sess.locks (uint64, count, Lock Operations) - - LCK.wstat.creat (uint64, count, Created locks) - - LCK.wstat.destroy (uint64, count, Destroyed locks) - - LCK.wstat.locks (uint64, count, Lock Operations) - - LCK.herder.creat (uint64, count, Created locks) - - LCK.herder.destroy (uint64, count, Destroyed locks) - - LCK.herder.locks (uint64, count, Lock Operations) - - LCK.wq.creat (uint64, count, Created locks) - - LCK.wq.destroy (uint64, count, Destroyed locks) - - LCK.wq.locks (uint64, count, Lock Operations) - - LCK.objhdr.creat (uint64, count, Created locks) - - LCK.objhdr.destroy (uint64, count, Destroyed locks) - - LCK.objhdr.locks (uint64, count, Lock Operations) - - LCK.exp.creat (uint64, count, Created locks) - - LCK.exp.destroy (uint64, count, Destroyed locks) - - LCK.exp.locks (uint64, count, Lock Operations) - - LCK.lru.creat (uint64, count, Created locks) - - LCK.lru.destroy (uint64, count, Destroyed locks) - - LCK.lru.locks (uint64, count, Lock Operations) - - LCK.cli.creat (uint64, count, Created locks) - - LCK.cli.destroy (uint64, count, Destroyed locks) - - LCK.cli.locks (uint64, count, Lock Operations) - - LCK.ban.creat (uint64, count, Created locks) - - LCK.ban.destroy (uint64, count, Destroyed locks) - - LCK.ban.locks (uint64, count, Lock Operations) - - LCK.vbp.creat (uint64, count, Created locks) - - LCK.vbp.destroy (uint64, count, Destroyed locks) - - LCK.vbp.locks (uint64, count, Lock Operations) - - LCK.backend.creat (uint64, count, Created locks) - - LCK.backend.destroy (uint64, count, Destroyed locks) - - LCK.backend.locks (uint64, count, Lock Operations) - - LCK.vcapace.creat (uint64, count, Created locks) - - LCK.vcapace.destroy (uint64, count, Destroyed locks) - - LCK.vcapace.locks (uint64, count, Lock Operations) - - LCK.nbusyobj.creat (uint64, count, Created locks) - - LCK.nbusyobj.destroy (uint64, count, Destroyed locks) - - LCK.nbusyobj.locks (uint64, count, Lock Operations) - - LCK.busyobj.creat (uint64, count, Created locks) - - LCK.busyobj.destroy (uint64, count, Destroyed locks) - - LCK.busyobj.locks (uint64, count, Lock Operations) - - LCK.mempool.creat (uint64, count, Created locks) - - LCK.mempool.destroy (uint64, count, Destroyed locks) - - LCK.mempool.locks (uint64, count, Lock Operations) - - LCK.vxid.creat (uint64, count, Created locks) - - LCK.vxid.destroy (uint64, count, Destroyed locks) - - LCK.vxid.locks (uint64, count, Lock Operations) - - LCK.pipestat.creat (uint64, count, Created locks) - - LCK.pipestat.destroy (uint64, count, Destroyed locks) - - LCK.pipestat.locks (uint64, count, Lock Operations) + - MAIN.uptime (uint64, count, Child process uptime) + - MAIN.sess_conn (uint64, count, Sessions accepted) + - MAIN.sess_drop (uint64, count, Sessions dropped) + - MAIN.sess_fail (uint64, count, Session accept failures) + - MAIN.sess_pipe_overflow (uint64, count, Session pipe overflow) + - MAIN.client_req_400 (uint64, count, Client requests received,) + - MAIN.client_req_411 (uint64, count, Client requests received,) + - MAIN.client_req_413 (uint64, count, Client requests received,) + - MAIN.client_req_417 (uint64, count, Client requests received,) + - MAIN.client_req (uint64, count, Good client requests) + - MAIN.cache_hit (uint64, count, Cache hits) + - MAIN.cache_hitpass (uint64, count, Cache hits for) + - MAIN.cache_miss (uint64, count, Cache misses) + - MAIN.backend_conn (uint64, count, Backend conn. success) + - MAIN.backend_unhealthy (uint64, count, Backend conn. not) + - MAIN.backend_busy (uint64, count, Backend conn. too) + - MAIN.backend_fail (uint64, count, Backend conn. failures) + - MAIN.backend_reuse (uint64, count, Backend conn. reuses) + - MAIN.backend_toolate (uint64, count, Backend conn. was) + - MAIN.backend_recycle (uint64, count, Backend conn. recycles) + - MAIN.backend_retry (uint64, count, Backend conn. retry) + - MAIN.fetch_head (uint64, count, Fetch no body) + - MAIN.fetch_length (uint64, count, Fetch with Length) + - MAIN.fetch_chunked (uint64, count, Fetch chunked) + - MAIN.fetch_eof (uint64, count, Fetch EOF) + - MAIN.fetch_bad (uint64, count, Fetch bad T- E) + - MAIN.fetch_close (uint64, count, Fetch wanted close) + - MAIN.fetch_oldhttp (uint64, count, Fetch pre HTTP/1.1) + - MAIN.fetch_zero (uint64, count, Fetch zero len) + - MAIN.fetch_1xx (uint64, count, Fetch no body) + - MAIN.fetch_204 (uint64, count, Fetch no body) + - MAIN.fetch_304 (uint64, count, Fetch no body) + - MAIN.fetch_failed (uint64, count, Fetch failed (all) + - MAIN.fetch_no_thread (uint64, count, Fetch failed (no) + - MAIN.pools (uint64, count, Number of thread) + - MAIN.threads (uint64, count, Total number of) + - MAIN.threads_limited (uint64, count, Threads hit max) + - MAIN.threads_created (uint64, count, Threads created) + - MAIN.threads_destroyed (uint64, count, Threads destroyed) + - MAIN.threads_failed (uint64, count, Thread creation failed) + - MAIN.thread_queue_len (uint64, count, Length of session) + - MAIN.busy_sleep (uint64, count, Number of requests) + - MAIN.busy_wakeup (uint64, count, Number of requests) + - MAIN.sess_queued (uint64, count, Sessions queued for) + - MAIN.sess_dropped (uint64, count, Sessions dropped for) + - MAIN.n_object (uint64, count, object structs made) + - MAIN.n_vampireobject (uint64, count, unresurrected objects) + - MAIN.n_objectcore (uint64, count, objectcore structs made) + - MAIN.n_objecthead (uint64, count, objecthead structs made) + - MAIN.n_waitinglist (uint64, count, waitinglist structs made) + - MAIN.n_backend (uint64, count, Number of backends) + - MAIN.n_expired (uint64, count, Number of expired) + - MAIN.n_lru_nuked (uint64, count, Number of LRU) + - MAIN.n_lru_moved (uint64, count, Number of LRU) + - MAIN.losthdr (uint64, count, HTTP header overflows) + - MAIN.s_sess (uint64, count, Total sessions seen) + - MAIN.s_req (uint64, count, Total requests seen) + - MAIN.s_pipe (uint64, count, Total pipe sessions) + - MAIN.s_pass (uint64, count, Total pass- ed requests) + - MAIN.s_fetch (uint64, count, Total backend fetches) + - MAIN.s_synth (uint64, count, Total synthetic responses) + - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) + - MAIN.s_req_bodybytes (uint64, count, Request body bytes) + - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) + - MAIN.s_resp_bodybytes (uint64, count, Response body bytes) + - MAIN.s_pipe_hdrbytes (uint64, count, Pipe request header) + - MAIN.s_pipe_in (uint64, count, Piped bytes from) + - MAIN.s_pipe_out (uint64, count, Piped bytes to) + - MAIN.sess_closed (uint64, count, Session Closed) + - MAIN.sess_pipeline (uint64, count, Session Pipeline) + - MAIN.sess_readahead (uint64, count, Session Read Ahead) + - MAIN.sess_herd (uint64, count, Session herd) + - MAIN.shm_records (uint64, count, SHM records) + - MAIN.shm_writes (uint64, count, SHM writes) + - MAIN.shm_flushes (uint64, count, SHM flushes due) + - MAIN.shm_cont (uint64, count, SHM MTX contention) + - MAIN.shm_cycles (uint64, count, SHM cycles through) + - MAIN.sms_nreq (uint64, count, SMS allocator requests) + - MAIN.sms_nobj (uint64, count, SMS outstanding allocations) + - MAIN.sms_nbytes (uint64, count, SMS outstanding bytes) + - MAIN.sms_balloc (uint64, count, SMS bytes allocated) + - MAIN.sms_bfree (uint64, count, SMS bytes freed) + - MAIN.backend_req (uint64, count, Backend requests made) + - MAIN.n_vcl (uint64, count, Number of loaded) + - MAIN.n_vcl_avail (uint64, count, Number of VCLs) + - MAIN.n_vcl_discard (uint64, count, Number of discarded) + - MAIN.bans (uint64, count, Count of bans) + - MAIN.bans_completed (uint64, count, Number of bans) + - MAIN.bans_obj (uint64, count, Number of bans) + - MAIN.bans_req (uint64, count, Number of bans) + - MAIN.bans_added (uint64, count, Bans added) + - MAIN.bans_deleted (uint64, count, Bans deleted) + - MAIN.bans_tested (uint64, count, Bans tested against) + - MAIN.bans_obj_killed (uint64, count, Objects killed by) + - MAIN.bans_lurker_tested (uint64, count, Bans tested against) + - MAIN.bans_tests_tested (uint64, count, Ban tests tested) + - MAIN.bans_lurker_tests_tested (uint64, count, Ban tests tested) + - MAIN.bans_lurker_obj_killed (uint64, count, Objects killed by) + - MAIN.bans_dups (uint64, count, Bans superseded by) + - MAIN.bans_lurker_contention (uint64, count, Lurker gave way) + - MAIN.bans_persisted_bytes (uint64, count, Bytes used by) + - MAIN.bans_persisted_fragmentation (uint64, count, Extra bytes in) + - MAIN.n_purges (uint64, count, Number of purge) + - MAIN.n_obj_purged (uint64, count, Number of purged) + - MAIN.exp_mailed (uint64, count, Number of objects) + - MAIN.exp_received (uint64, count, Number of objects) + - MAIN.hcb_nolock (uint64, count, HCB Lookups without) + - MAIN.hcb_lock (uint64, count, HCB Lookups with) + - MAIN.hcb_insert (uint64, count, HCB Inserts) + - MAIN.esi_errors (uint64, count, ESI parse errors) + - MAIN.esi_warnings (uint64, count, ESI parse warnings) + - MAIN.vmods (uint64, count, Loaded VMODs) + - MAIN.n_gzip (uint64, count, Gzip operations) + - MAIN.n_gunzip (uint64, count, Gunzip operations) + - MAIN.vsm_free (uint64, count, Free VSM space) + - MAIN.vsm_used (uint64, count, Used VSM space) + - MAIN.vsm_cooling (uint64, count, Cooling VSM space) + - MAIN.vsm_overflow (uint64, count, Overflow VSM space) + - MAIN.vsm_overflowed (uint64, count, Overflowed VSM space) + - MGT.uptime (uint64, count, Management process uptime) + - MGT.child_start (uint64, count, Child process started) + - MGT.child_exit (uint64, count, Child process normal) + - MGT.child_stop (uint64, count, Child process unexpected) + - MGT.child_died (uint64, count, Child process died) + - MGT.child_dump (uint64, count, Child process core) + - MGT.child_panic (uint64, count, Child process panic) + - MEMPOOL.vbc.live (uint64, count, In use) + - MEMPOOL.vbc.pool (uint64, count, In Pool) + - MEMPOOL.vbc.sz_wanted (uint64, count, Size requested) + - MEMPOOL.vbc.sz_needed (uint64, count, Size allocated) + - MEMPOOL.vbc.allocs (uint64, count, Allocations ) + - MEMPOOL.vbc.frees (uint64, count, Frees ) + - MEMPOOL.vbc.recycle (uint64, count, Recycled from pool) + - MEMPOOL.vbc.timeout (uint64, count, Timed out from) + - MEMPOOL.vbc.toosmall (uint64, count, Too small to) + - MEMPOOL.vbc.surplus (uint64, count, Too many for) + - MEMPOOL.vbc.randry (uint64, count, Pool ran dry) + - MEMPOOL.busyobj.live (uint64, count, In use) + - MEMPOOL.busyobj.pool (uint64, count, In Pool) + - MEMPOOL.busyobj.sz_wanted (uint64, count, Size requested) + - MEMPOOL.busyobj.sz_needed (uint64, count, Size allocated) + - MEMPOOL.busyobj.allocs (uint64, count, Allocations ) + - MEMPOOL.busyobj.frees (uint64, count, Frees ) + - MEMPOOL.busyobj.recycle (uint64, count, Recycled from pool) + - MEMPOOL.busyobj.timeout (uint64, count, Timed out from) + - MEMPOOL.busyobj.toosmall (uint64, count, Too small to) + - MEMPOOL.busyobj.surplus (uint64, count, Too many for) + - MEMPOOL.busyobj.randry (uint64, count, Pool ran dry) + - MEMPOOL.req0.live (uint64, count, In use) + - MEMPOOL.req0.pool (uint64, count, In Pool) + - MEMPOOL.req0.sz_wanted (uint64, count, Size requested) + - MEMPOOL.req0.sz_needed (uint64, count, Size allocated) + - MEMPOOL.req0.allocs (uint64, count, Allocations ) + - MEMPOOL.req0.frees (uint64, count, Frees ) + - MEMPOOL.req0.recycle (uint64, count, Recycled from pool) + - MEMPOOL.req0.timeout (uint64, count, Timed out from) + - MEMPOOL.req0.toosmall (uint64, count, Too small to) + - MEMPOOL.req0.surplus (uint64, count, Too many for) + - MEMPOOL.req0.randry (uint64, count, Pool ran dry) + - MEMPOOL.sess0.live (uint64, count, In use) + - MEMPOOL.sess0.pool (uint64, count, In Pool) + - MEMPOOL.sess0.sz_wanted (uint64, count, Size requested) + - MEMPOOL.sess0.sz_needed (uint64, count, Size allocated) + - MEMPOOL.sess0.allocs (uint64, count, Allocations ) + - MEMPOOL.sess0.frees (uint64, count, Frees ) + - MEMPOOL.sess0.recycle (uint64, count, Recycled from pool) + - MEMPOOL.sess0.timeout (uint64, count, Timed out from) + - MEMPOOL.sess0.toosmall (uint64, count, Too small to) + - MEMPOOL.sess0.surplus (uint64, count, Too many for) + - MEMPOOL.sess0.randry (uint64, count, Pool ran dry) + - MEMPOOL.req1.live (uint64, count, In use) + - MEMPOOL.req1.pool (uint64, count, In Pool) + - MEMPOOL.req1.sz_wanted (uint64, count, Size requested) + - MEMPOOL.req1.sz_needed (uint64, count, Size allocated) + - MEMPOOL.req1.allocs (uint64, count, Allocations ) + - MEMPOOL.req1.frees (uint64, count, Frees ) + - MEMPOOL.req1.recycle (uint64, count, Recycled from pool) + - MEMPOOL.req1.timeout (uint64, count, Timed out from) + - MEMPOOL.req1.toosmall (uint64, count, Too small to) + - MEMPOOL.req1.surplus (uint64, count, Too many for) + - MEMPOOL.req1.randry (uint64, count, Pool ran dry) + - MEMPOOL.sess1.live (uint64, count, In use) + - MEMPOOL.sess1.pool (uint64, count, In Pool) + - MEMPOOL.sess1.sz_wanted (uint64, count, Size requested) + - MEMPOOL.sess1.sz_needed (uint64, count, Size allocated) + - MEMPOOL.sess1.allocs (uint64, count, Allocations ) + - MEMPOOL.sess1.frees (uint64, count, Frees ) + - MEMPOOL.sess1.recycle (uint64, count, Recycled from pool) + - MEMPOOL.sess1.timeout (uint64, count, Timed out from) + - MEMPOOL.sess1.toosmall (uint64, count, Too small to) + - MEMPOOL.sess1.surplus (uint64, count, Too many for) + - MEMPOOL.sess1.randry (uint64, count, Pool ran dry) + - SMA.s0.c_req (uint64, count, Allocator requests) + - SMA.s0.c_fail (uint64, count, Allocator failures) + - SMA.s0.c_bytes (uint64, count, Bytes allocated) + - SMA.s0.c_freed (uint64, count, Bytes freed) + - SMA.s0.g_alloc (uint64, count, Allocations outstanding) + - SMA.s0.g_bytes (uint64, count, Bytes outstanding) + - SMA.s0.g_space (uint64, count, Bytes available) + - SMA.Transient.c_req (uint64, count, Allocator requests) + - SMA.Transient.c_fail (uint64, count, Allocator failures) + - SMA.Transient.c_bytes (uint64, count, Bytes allocated) + - SMA.Transient.c_freed (uint64, count, Bytes freed) + - SMA.Transient.g_alloc (uint64, count, Allocations outstanding) + - SMA.Transient.g_bytes (uint64, count, Bytes outstanding) + - SMA.Transient.g_space (uint64, count, Bytes available) + - VBE.default(127.0.0.1,,8080).vcls (uint64, count, VCL references) + - VBE.default(127.0.0.1,,8080).happy (uint64, count, Happy health probes) + - VBE.default(127.0.0.1,,8080).bereq_hdrbytes (uint64, count, Request header bytes) + - VBE.default(127.0.0.1,,8080).bereq_bodybytes (uint64, count, Request body bytes) + - VBE.default(127.0.0.1,,8080).beresp_hdrbytes (uint64, count, Response header bytes) + - VBE.default(127.0.0.1,,8080).beresp_bodybytes (uint64, count, Response body bytes) + - VBE.default(127.0.0.1,,8080).pipe_hdrbytes (uint64, count, Pipe request header) + - VBE.default(127.0.0.1,,8080).pipe_out (uint64, count, Piped bytes to) + - VBE.default(127.0.0.1,,8080).pipe_in (uint64, count, Piped bytes from) + - LCK.sms.creat (uint64, count, Created locks) + - LCK.sms.destroy (uint64, count, Destroyed locks) + - LCK.sms.locks (uint64, count, Lock Operations) + - LCK.smp.creat (uint64, count, Created locks) + - LCK.smp.destroy (uint64, count, Destroyed locks) + - LCK.smp.locks (uint64, count, Lock Operations) + - LCK.sma.creat (uint64, count, Created locks) + - LCK.sma.destroy (uint64, count, Destroyed locks) + - LCK.sma.locks (uint64, count, Lock Operations) + - LCK.smf.creat (uint64, count, Created locks) + - LCK.smf.destroy (uint64, count, Destroyed locks) + - LCK.smf.locks (uint64, count, Lock Operations) + - LCK.hsl.creat (uint64, count, Created locks) + - LCK.hsl.destroy (uint64, count, Destroyed locks) + - LCK.hsl.locks (uint64, count, Lock Operations) + - LCK.hcb.creat (uint64, count, Created locks) + - LCK.hcb.destroy (uint64, count, Destroyed locks) + - LCK.hcb.locks (uint64, count, Lock Operations) + - LCK.hcl.creat (uint64, count, Created locks) + - LCK.hcl.destroy (uint64, count, Destroyed locks) + - LCK.hcl.locks (uint64, count, Lock Operations) + - LCK.vcl.creat (uint64, count, Created locks) + - LCK.vcl.destroy (uint64, count, Destroyed locks) + - LCK.vcl.locks (uint64, count, Lock Operations) + - LCK.sessmem.creat (uint64, count, Created locks) + - LCK.sessmem.destroy (uint64, count, Destroyed locks) + - LCK.sessmem.locks (uint64, count, Lock Operations) + - LCK.sess.creat (uint64, count, Created locks) + - LCK.sess.destroy (uint64, count, Destroyed locks) + - LCK.sess.locks (uint64, count, Lock Operations) + - LCK.wstat.creat (uint64, count, Created locks) + - LCK.wstat.destroy (uint64, count, Destroyed locks) + - LCK.wstat.locks (uint64, count, Lock Operations) + - LCK.herder.creat (uint64, count, Created locks) + - LCK.herder.destroy (uint64, count, Destroyed locks) + - LCK.herder.locks (uint64, count, Lock Operations) + - LCK.wq.creat (uint64, count, Created locks) + - LCK.wq.destroy (uint64, count, Destroyed locks) + - LCK.wq.locks (uint64, count, Lock Operations) + - LCK.objhdr.creat (uint64, count, Created locks) + - LCK.objhdr.destroy (uint64, count, Destroyed locks) + - LCK.objhdr.locks (uint64, count, Lock Operations) + - LCK.exp.creat (uint64, count, Created locks) + - LCK.exp.destroy (uint64, count, Destroyed locks) + - LCK.exp.locks (uint64, count, Lock Operations) + - LCK.lru.creat (uint64, count, Created locks) + - LCK.lru.destroy (uint64, count, Destroyed locks) + - LCK.lru.locks (uint64, count, Lock Operations) + - LCK.cli.creat (uint64, count, Created locks) + - LCK.cli.destroy (uint64, count, Destroyed locks) + - LCK.cli.locks (uint64, count, Lock Operations) + - LCK.ban.creat (uint64, count, Created locks) + - LCK.ban.destroy (uint64, count, Destroyed locks) + - LCK.ban.locks (uint64, count, Lock Operations) + - LCK.vbp.creat (uint64, count, Created locks) + - LCK.vbp.destroy (uint64, count, Destroyed locks) + - LCK.vbp.locks (uint64, count, Lock Operations) + - LCK.backend.creat (uint64, count, Created locks) + - LCK.backend.destroy (uint64, count, Destroyed locks) + - LCK.backend.locks (uint64, count, Lock Operations) + - LCK.vcapace.creat (uint64, count, Created locks) + - LCK.vcapace.destroy (uint64, count, Destroyed locks) + - LCK.vcapace.locks (uint64, count, Lock Operations) + - LCK.nbusyobj.creat (uint64, count, Created locks) + - LCK.nbusyobj.destroy (uint64, count, Destroyed locks) + - LCK.nbusyobj.locks (uint64, count, Lock Operations) + - LCK.busyobj.creat (uint64, count, Created locks) + - LCK.busyobj.destroy (uint64, count, Destroyed locks) + - LCK.busyobj.locks (uint64, count, Lock Operations) + - LCK.mempool.creat (uint64, count, Created locks) + - LCK.mempool.destroy (uint64, count, Destroyed locks) + - LCK.mempool.locks (uint64, count, Lock Operations) + - LCK.vxid.creat (uint64, count, Created locks) + - LCK.vxid.destroy (uint64, count, Destroyed locks) + - LCK.vxid.locks (uint64, count, Lock Operations) + - LCK.pipestat.creat (uint64, count, Created locks) + - LCK.pipestat.destroy (uint64, count, Destroyed locks) + - LCK.pipestat.locks (uint64, count, Lock Operations) +## Tags -### Tags: - -As indicated above, the prefix of a varnish stat will be used as it's 'section' tag. So section tag may have one of +As indicated above, the prefix of a varnish stat will be used as it's 'section' tag. So section tag may have one of the following values: + - section: - MAIN - MGT @@ -339,14 +339,13 @@ the following values: - VBE - LCK - - -### Permissions: +## Permissions It's important to note that this plugin references varnishstat, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -358,6 +357,7 @@ telegraf : telegraf varnish ``` **Extended filesystem ACL's**: + ```bash $ getfacl /var/lib/varnish//_.vsm # file: var/lib/varnish//_.vsm @@ -382,12 +382,14 @@ other::--- **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.varnish]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -398,9 +400,9 @@ Defaults!VARNISHSTAT !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output: +## Example Output -``` +```shell telegraf --config etc/telegraf.conf --input-filter varnish --test * Plugin: varnish, Collection 1 > varnish,host=rpercy-VirtualBox,section=MAIN cache_hit=0i,cache_miss=0i,uptime=8416i 1462765437090957980 diff --git a/plugins/inputs/vsphere/METRICS.md b/plugins/inputs/vsphere/METRICS.md index d1a34bb26c4f9..6e21ca0c8af6f 100644 --- a/plugins/inputs/vsphere/METRICS.md +++ b/plugins/inputs/vsphere/METRICS.md @@ -1,7 +1,8 @@ # Common vSphere Performance Metrics -The set of performance metrics in vSphere is open ended. Metrics may be added or removed in new releases -and the set of available metrics may vary depending hardware, as well as what plugins and add-on products -are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed + +The set of performance metrics in vSphere is open ended. Metrics may be added or removed in new releases +and the set of available metrics may vary depending hardware, as well as what plugins and add-on products +are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed below are the most commonly available as of vSphere 6.5. For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare vCenter Converter API Reference](https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.PerformanceManager.html). @@ -9,12 +10,14 @@ For a complete list of metrics available from vSphere and the units they measure To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc) To obtain the set of metrics for e.g. a VM, you may use the following command: -``` + +```shell govc metric.ls vm/* ``` ## Virtual Machine Metrics -``` + +```metrics cpu.demandEntitlementRatio.latest cpu.usage.average cpu.ready.summation @@ -107,7 +110,8 @@ virtualDisk.read.average ``` ## Host System Metrics -``` + +```metrics cpu.corecount.contention.average cpu.usage.average cpu.reservedCapacity.average @@ -190,7 +194,8 @@ vmop.numXVMotion.latest ``` ## Cluster Metrics -``` + +```metrics cpu.corecount.contention.average cpu.usage.average cpu.reservedCapacity.average @@ -273,7 +278,8 @@ vmop.numXVMotion.latest ``` ## Datastore Metrics -``` + +```metrics datastore.numberReadAveraged.average datastore.throughput.contention.average datastore.throughput.usage.average diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 7d73ea7e35855..5b5fdaf22fd4a 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -18,7 +18,7 @@ Compatibility information was found [here](https://github.com/vmware/govmomi/tre NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. For example, to disable collection of VMs, add this: -``` +```toml vm_metric_exclude = [ "*" ] ``` @@ -216,7 +216,7 @@ A vCenter administrator can change this setting, see this [VMware KB article](ht Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` -``` +```toml ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 @@ -230,17 +230,18 @@ though the default of 1 (no concurrency) should be sufficient for most configura For setting up concurrency, modify `collect_concurrency` and `discover_concurrency` parameters. -``` +```toml ## number of go routines to use for collection and discovery of objects and metrics # collect_concurrency = 1 # discover_concurrency = 1 ``` ### Inventory Paths + Resources to be monitored can be selected using Inventory Paths. This treats the vSphere inventory as a tree structure similar to a file system. A vSphere inventory has a structure similar to this: -``` +```bash +-DC0 # Virtual datacenter +-datastore # Datastore folder (created by system) @@ -266,6 +267,7 @@ to a file system. A vSphere inventory has a structure similar to this: ``` #### Using Inventory Paths + Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. @@ -275,9 +277,11 @@ Another possibility is to select objects using a partial name, such as ```/DC0/v Finally, due to the arbitrary nesting of the folder structure, we need a "recursive wildcard" for traversing multiple folders. We use the "**" symbol for that. If we want to look for a VM with a name starting with "hadoop" in any folder, we could use the following path: ```/DC0/vm/**/hadoop*``` #### Multiple paths to VMs + As we can see from the example tree above, VMs appear both in its on folder under the datacenter, as well as under the hosts. This is useful when you like to select VMs on a specific host. For example, ```/DC0/host/Cluster1/Host1/hadoop*``` selects all VMs with a name starting with "hadoop" that are running on Host1. We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop*```. This selects any VM matching "hadoop*" on any host in Cluster1. + ## Performance Considerations ### Realtime vs. historical metrics @@ -287,7 +291,7 @@ vCenter keeps two different kinds of metrics, known as realtime and historical m * Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. * Historical metrics: Available at a (default) 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the most granular rollup which defaults to 5 minutes but can be changed in vCenter to other interval durations. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. -For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html +For more information, refer to the vSphere documentation here: This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collection interval. This will cause error messages similar to this to appear in the Telegraf logs: @@ -347,12 +351,14 @@ Cluster metrics are handled a bit differently by vCenter. They are aggregated fr ```2018-11-02T13:37:11Z E! Error in plugin [inputs.vsphere]: ServerFaultCode: This operation is restricted by the administrator - 'vpxd.stats.maxQueryMetrics'. Contact your system administrator``` There are two ways of addressing this: + * Ask your vCenter administrator to set ```config.vpxd.stats.maxQueryMetrics``` to a number that's higher than the total number of virtual machines managed by a vCenter instance. * Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result. ### Concurrency settings The vSphere plugin allows you to specify two concurrency settings: + * ```collect_concurrency```: The maximum number of simultaneous queries for performance metrics allowed per resource type. * ```discover_concurrency```: The maximum number of simultaneous queries for resource discovery allowed. @@ -361,77 +367,78 @@ While a higher level of concurrency typically has a positive impact on performan ### Configuring historical_interval setting When the vSphere plugin queries vCenter for historical statistics it queries for statistics that exist at a specific interval. The default historical interval duration is 5 minutes but if this interval has been changed then you must override the default query interval in the vSphere plugin. + * ```historical_interval```: The interval of the most granular statistics configured in vSphere represented in seconds. ## Measurements & Fields -- Cluster Stats - - Cluster services: CPU, memory, failover - - CPU: total, usage - - Memory: consumed, total, vmmemctl - - VM operations: # changes, clone, create, deploy, destroy, power, reboot, reconfigure, register, reset, shutdown, standby, vmotion -- Host Stats: - - CPU: total, usage, cost, mhz - - Datastore: iops, latency, read/write bytes, # reads/writes - - Disk: commands, latency, kernel reads/writes, # reads/writes, queues - - Memory: total, usage, active, latency, swap, shared, vmmemctl - - Network: broadcast, bytes, dropped, errors, multicast, packets, usage - - Power: energy, usage, capacity - - Res CPU: active, max, running - - Storage Adapter: commands, latency, # reads/writes - - Storage Path: commands, latency, # reads/writes - - System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap - - System: uptime - - Flash Module: active VMDKs -- VM Stats: - - CPU: demand, usage, readiness, cost, mhz - - Datastore: latency, # reads/writes - - Disk: commands, latency, # reads/writes, provisioned, usage - - Memory: granted, usage, active, swap, vmmemctl - - Network: broadcast, bytes, dropped, multicast, packets, usage - - Power: energy, usage - - Res CPU: active, max, running - - System: operating system uptime, uptime - - Virtual Disk: seeks, # reads/writes, latency, load -- Datastore stats: - - Disk: Capacity, provisioned, used +* Cluster Stats + * Cluster services: CPU, memory, failover + * CPU: total, usage + * Memory: consumed, total, vmmemctl + * VM operations: # changes, clone, create, deploy, destroy, power, reboot, reconfigure, register, reset, shutdown, standby, vmotion +* Host Stats: + * CPU: total, usage, cost, mhz + * Datastore: iops, latency, read/write bytes, # reads/writes + * Disk: commands, latency, kernel reads/writes, # reads/writes, queues + * Memory: total, usage, active, latency, swap, shared, vmmemctl + * Network: broadcast, bytes, dropped, errors, multicast, packets, usage + * Power: energy, usage, capacity + * Res CPU: active, max, running + * Storage Adapter: commands, latency, # reads/writes + * Storage Path: commands, latency, # reads/writes + * System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap + * System: uptime + * Flash Module: active VMDKs +* VM Stats: + * CPU: demand, usage, readiness, cost, mhz + * Datastore: latency, # reads/writes + * Disk: commands, latency, # reads/writes, provisioned, usage + * Memory: granted, usage, active, swap, vmmemctl + * Network: broadcast, bytes, dropped, multicast, packets, usage + * Power: energy, usage + * Res CPU: active, max, running + * System: operating system uptime, uptime + * Virtual Disk: seeks, # reads/writes, latency, load +* Datastore stats: + * Disk: Capacity, provisioned, used For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) ## Tags -- all metrics - - vcenter (vcenter url) -- all host metrics - - cluster (vcenter cluster) -- all vm metrics - - cluster (vcenter cluster) - - esxhost (name of ESXi host) - - guest (guest operating system id) -- cpu stats for Host and VM - - cpu (cpu core - not all CPU fields will have this tag) -- datastore stats for Host and VM - - datastore (id of datastore) -- disk stats for Host and VM - - disk (name of disk) -- disk.used.capacity for Datastore - - disk (type of disk) -- net stats for Host and VM - - interface (name of network interface) -- storageAdapter stats for Host - - adapter (name of storage adapter) -- storagePath stats for Host - - path (id of storage path) -- sys.resource* stats for Host - - resource (resource type) -- vflashModule stats for Host - - module (name of flash module) -- virtualDisk stats for VM - - disk (name of virtual disk) +* all metrics + * vcenter (vcenter url) +* all host metrics + * cluster (vcenter cluster) +* all vm metrics + * cluster (vcenter cluster) + * esxhost (name of ESXi host) + * guest (guest operating system id) +* cpu stats for Host and VM + * cpu (cpu core - not all CPU fields will have this tag) +* datastore stats for Host and VM + * datastore (id of datastore) +* disk stats for Host and VM + * disk (name of disk) +* disk.used.capacity for Datastore + * disk (type of disk) +* net stats for Host and VM + * interface (name of network interface) +* storageAdapter stats for Host + * adapter (name of storage adapter) +* storagePath stats for Host + * path (id of storage path) +* sys.resource* stats for Host + * resource (resource type) +* vflashModule stats for Host + * module (name of flash module) +* virtualDisk stats for VM + * disk (name of virtual disk) ## Sample output -``` +```shell vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 run_summation=2608i,ready_summation=129i,usage_average=5.01,used_summation=2134i,demand_average=326i 1535660299000000000 vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=321i,bytesTx_average=335i 1535660299000000000 vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=144i,read_average=4i 1535660299000000000 From 7d3531a29bd4c8c9e09b7e213514a6b6e7cfb957 Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 24 Nov 2021 11:50:22 -0700 Subject: [PATCH 074/133] chore: clean up markdown lint errors input plugins w to z (#10166) --- plugins/inputs/webhooks/README.md | 15 +-- plugins/inputs/webhooks/filestack/README.md | 2 + plugins/inputs/webhooks/github/README.md | 92 ++++++++++++---- plugins/inputs/webhooks/mandrill/README.md | 2 + plugins/inputs/webhooks/papertrail/README.md | 4 +- plugins/inputs/webhooks/particle/README.md | 19 ++-- plugins/inputs/webhooks/rollbar/README.md | 16 ++- plugins/inputs/wireguard/README.md | 16 +-- plugins/inputs/wireless/README.md | 8 +- plugins/inputs/x509_cert/README.md | 11 +- plugins/inputs/zipkin/README.md | 110 +++++++++++-------- plugins/inputs/zookeeper/README.md | 11 +- 12 files changed, 184 insertions(+), 122 deletions(-) diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md index 2eea2a537adee..6b90dc45cab57 100644 --- a/plugins/inputs/webhooks/README.md +++ b/plugins/inputs/webhooks/README.md @@ -3,18 +3,17 @@ This is a Telegraf service plugin that start an http server and register multiple webhook listeners. ```sh -$ telegraf config -input-filter webhooks -output-filter influxdb > config.conf.new +telegraf config -input-filter webhooks -output-filter influxdb > config.conf.new ``` Change the config file to point to the InfluxDB server you are using and adjust the settings to match your environment. Once that is complete: ```sh -$ cp config.conf.new /etc/telegraf/telegraf.conf -$ sudo service telegraf start +cp config.conf.new /etc/telegraf/telegraf.conf +sudo service telegraf start ``` - -### Configuration: +## Configuration ```toml [[inputs.webhooks]] @@ -41,8 +40,7 @@ $ sudo service telegraf start path = "/particle" ``` - -### Available webhooks +## Available webhooks - [Filestack](filestack/) - [Github](github/) @@ -51,8 +49,7 @@ $ sudo service telegraf start - [Papertrail](papertrail/) - [Particle](particle/) - -### Adding new webhooks plugin +## Adding new webhooks plugin 1. Add your webhook plugin inside the `webhooks` folder 1. Your plugin must implement the `Webhook` interface diff --git a/plugins/inputs/webhooks/filestack/README.md b/plugins/inputs/webhooks/filestack/README.md index 7af2a780d9872..61d1d96736e32 100644 --- a/plugins/inputs/webhooks/filestack/README.md +++ b/plugins/inputs/webhooks/filestack/README.md @@ -11,7 +11,9 @@ See the [webhook doc](https://www.filestack.com/docs/webhooks). All events for logs the original timestamp, the action and the id. **Tags:** + * 'action' = `event.action` string **Fields:** + * 'id' = `event.id` string diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md index 4a4e64c730a67..154b422b9d2c6 100644 --- a/plugins/inputs/webhooks/github/README.md +++ b/plugins/inputs/webhooks/github/README.md @@ -1,23 +1,26 @@ # github webhooks -You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me everything'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file. +You should configure your Organization's Webhooks to point at the `webhooks` service. To do this go to `github.com/{my_organization}` and click `Settings > Webhooks > Add webhook`. In the resulting menu set `Payload URL` to `http://:1619/github`, `Content type` to `application/json` and under the section `Which events would you like to trigger this webhook?` select 'Send me **everything**'. By default all of the events will write to the `github_webhooks` measurement, this is configurable by setting the `measurement_name` in the config file. You can also add a secret that will be used by telegraf to verify the authenticity of the requests. ## Events The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows: -``` + +```toml # TAGS * 'tagKey' = `tagValue` type # FIELDS * 'fieldKey' = `fieldValue` type ``` -The tag values and field values show the place on the incoming JSON object where the data is sourced from. -#### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent) +The tag values and field values show the place on the incoming JSON object where the data is sourced from. + +### [`commit_comment` event](https://developer.github.com/v3/activity/events/types/#commitcommentevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -25,15 +28,17 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.comment.commit_id` string * 'comment' = `event.comment.body` string -#### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent) +### [`create` event](https://developer.github.com/v3/activity/events/types/#createevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -41,15 +46,17 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'ref' = `event.ref` string * 'refType' = `event.ref_type` string -#### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent) +### [`delete` event](https://developer.github.com/v3/activity/events/types/#deleteevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -57,15 +64,17 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'ref' = `event.ref` string * 'refType' = `event.ref_type` string -#### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent) +### [`deployment` event](https://developer.github.com/v3/activity/events/types/#deploymentevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -73,6 +82,7 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -81,9 +91,10 @@ The tag values and field values show the place on the incoming JSON object where * 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string -#### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent) +### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -91,6 +102,7 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -101,9 +113,10 @@ The tag values and field values show the place on the incoming JSON object where * 'depState' = `event.deployment_status.state` string * 'depDescription' = `event.deployment_status.description` string -#### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent) +### [`fork` event](https://developer.github.com/v3/activity/events/types/#forkevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -111,14 +124,16 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'forkee' = `event.forkee.repository` string -#### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent) +### [`gollum` event](https://developer.github.com/v3/activity/events/types/#gollumevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -126,13 +141,15 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int -#### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent) +### [`issue_comment` event](https://developer.github.com/v3/activity/events/types/#issuecommentevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -141,6 +158,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issue' = `event.issue.number` int **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -148,9 +166,10 @@ The tag values and field values show the place on the incoming JSON object where * 'comments' = `event.issue.comments` int * 'body' = `event.comment.body` string -#### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent) +### [`issues` event](https://developer.github.com/v3/activity/events/types/#issuesevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -160,15 +179,17 @@ The tag values and field values show the place on the incoming JSON object where * 'action' = `event.action` string **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'title' = `event.issue.title` string * 'comments' = `event.issue.comments` int -#### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent) +### [`member` event](https://developer.github.com/v3/activity/events/types/#memberevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -176,27 +197,31 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'newMember' = `event.sender.login` string * 'newMemberStatus' = `event.sender.site_admin` bool -#### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent) +### [`membership` event](https://developer.github.com/v3/activity/events/types/#membershipevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'user' = `event.sender.login` string * 'admin' = `event.sender.site_admin` bool * 'action' = `event.action` string **Fields:** + * 'newMember' = `event.sender.login` string * 'newMemberStatus' = `event.sender.site_admin` bool -#### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent) +### [`page_build` event](https://developer.github.com/v3/activity/events/types/#pagebuildevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -204,13 +229,15 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int -#### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent) +### [`public` event](https://developer.github.com/v3/activity/events/types/#publicevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -218,13 +245,15 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int -#### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent) +### [`pull_request_review_comment` event](https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'action' = `event.action` string * 'repository' = `event.repository.full_name` string @@ -234,6 +263,7 @@ The tag values and field values show the place on the incoming JSON object where * 'prNumber' = `event.pull_request.number` int **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -247,9 +277,10 @@ The tag values and field values show the place on the incoming JSON object where * 'commentFile' = `event.comment.file` string * 'comment' = `event.comment.body` string -#### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent) +### [`pull_request` event](https://developer.github.com/v3/activity/events/types/#pullrequestevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'action' = `event.action` string * 'repository' = `event.repository.full_name` string @@ -259,6 +290,7 @@ The tag values and field values show the place on the incoming JSON object where * 'prNumber' = `event.pull_request.number` int **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -270,9 +302,10 @@ The tag values and field values show the place on the incoming JSON object where * 'deletions' = `event.pull_request.deletions` int * 'changedFiles' = `event.pull_request.changed_files` int -#### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent) +### [`push` event](https://developer.github.com/v3/activity/events/types/#pushevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -280,6 +313,7 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int @@ -287,9 +321,10 @@ The tag values and field values show the place on the incoming JSON object where * 'before' = `event.before` string * 'after' = `event.after` string -#### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent) +### [`repository` event](https://developer.github.com/v3/activity/events/types/#repositoryevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -297,13 +332,15 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int -#### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent) +### [`release` event](https://developer.github.com/v3/activity/events/types/#releaseevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -311,14 +348,16 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'tagName' = `event.release.tag_name` string -#### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent) +### [`status` event](https://developer.github.com/v3/activity/events/types/#statusevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -326,15 +365,17 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.sha` string * 'state' = `event.state` string -#### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent) +### [`team_add` event](https://developer.github.com/v3/activity/events/types/#teamaddevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -342,14 +383,16 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int * 'teamName' = `event.team.name` string -#### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent) +### [`watch` event](https://developer.github.com/v3/activity/events/types/#watchevent) **Tags:** + * 'event' = `headers[X-Github-Event]` string * 'repository' = `event.repository.full_name` string * 'private' = `event.repository.private` bool @@ -357,6 +400,7 @@ The tag values and field values show the place on the incoming JSON object where * 'admin' = `event.sender.site_admin` bool **Fields:** + * 'stars' = `event.repository.stargazers_count` int * 'forks' = `event.repository.forks_count` int * 'issues' = `event.repository.open_issues_count` int diff --git a/plugins/inputs/webhooks/mandrill/README.md b/plugins/inputs/webhooks/mandrill/README.md index 9c4f3a58ccebe..d65dfb63b6597 100644 --- a/plugins/inputs/webhooks/mandrill/README.md +++ b/plugins/inputs/webhooks/mandrill/README.md @@ -9,7 +9,9 @@ See the [webhook doc](https://mandrill.zendesk.com/hc/en-us/articles/205583307-M All events for logs the original timestamp, the event name and the unique identifier of the message that generated the event. **Tags:** + * 'event' = `event.event` string **Fields:** + * 'id' = `event._id` string diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md index 3f9c33ec5320c..8300d802dc543 100644 --- a/plugins/inputs/webhooks/papertrail/README.md +++ b/plugins/inputs/webhooks/papertrail/README.md @@ -28,7 +28,7 @@ Events from Papertrail come in two forms: When a callback is received, an event-based point will look similar to: -``` +```shell papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_name="abc",program="CROND",severity="Info",source_id=2i,message="message body",source_ip="208.75.57.121",id=7711561783320576i,facility="Cron",url="https://papertrailapp.com/searches/42?centered_on_id=7711561783320576",search_id=42i 1453248892000000000 ``` @@ -41,6 +41,6 @@ papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_nam When a callback is received, a count-based point will look similar to: -``` +```shell papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000 ``` diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 8244fee8add1b..913dbccc6f5ac 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -1,6 +1,5 @@ # particle webhooks - You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to [https://console.particle.io](https://console.particle.io/) and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: ```json @@ -11,22 +10,21 @@ You should configure your Particle.io's Webhooks to point at the `webhooks` serv If required, enter your username and password, etc. and then click `Save` - - ## Events Your Particle device should publish an event that contains a JSON in the form of: -``` + +```json String data = String::format("{ \"tags\" : { - \"tag_name\": \"tag_value\", - \"other_tag\": \"other_value\" + \"tag_name\": \"tag_value\", + \"other_tag\": \"other_value\" }, - \"values\": { - \"value_name\": %f, - \"other_value\": %f, + \"values\": { + \"value_name\": %f, + \"other_value\": %f, } }", value_value, other_value - ); + ); Particle.publish("event_name", data, PRIVATE); ``` @@ -35,5 +33,4 @@ The number of tag values and field values is not restricted so you can send as m You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well. - See [webhook doc](https://docs.particle.io/reference/webhooks/) diff --git a/plugins/inputs/webhooks/rollbar/README.md b/plugins/inputs/webhooks/rollbar/README.md index 471dc9fd0d2db..136516c8eafee 100644 --- a/plugins/inputs/webhooks/rollbar/README.md +++ b/plugins/inputs/webhooks/rollbar/README.md @@ -5,19 +5,22 @@ You should configure your Rollbar's Webhooks to point at the `webhooks` service. ## Events The titles of the following sections are links to the full payloads and details for each event. The body contains what information from the event is persisted. The format is as follows: -``` + +```toml # TAGS * 'tagKey' = `tagValue` type # FIELDS * 'fieldKey' = `fieldValue` type ``` + The tag values and field values show the place on the incoming JSON object where the data is sourced from. See [webhook doc](https://rollbar.com/docs/webhooks/) -#### `new_item` event +### `new_item` event **Tags:** + * 'event' = `event.event_name` string * 'environment' = `event.data.item.environment` string * 'project_id = `event.data.item.project_id` int @@ -25,11 +28,13 @@ See [webhook doc](https://rollbar.com/docs/webhooks/) * 'level' = `event.data.item.last_occurence.level` string **Fields:** + * 'id' = `event.data.item.id` int -#### `occurrence` event +### `occurrence` event **Tags:** + * 'event' = `event.event_name` string * 'environment' = `event.data.item.environment` string * 'project_id = `event.data.item.project_id` int @@ -37,14 +42,17 @@ See [webhook doc](https://rollbar.com/docs/webhooks/) * 'level' = `event.data.occurrence.level` string **Fields:** + * 'id' = `event.data.item.id` int -#### `deploy` event +### `deploy` event **Tags:** + * 'event' = `event.event_name` string * 'environment' = `event.data.deploy.environment` string * 'project_id = `event.data.deploy.project_id` int **Fields:** + * 'id' = `event.data.item.id` int diff --git a/plugins/inputs/wireguard/README.md b/plugins/inputs/wireguard/README.md index 57e16ba4942c9..1f1c7c9899266 100644 --- a/plugins/inputs/wireguard/README.md +++ b/plugins/inputs/wireguard/README.md @@ -4,7 +4,7 @@ The Wireguard input plugin collects statistics on the local Wireguard server using the [`wgctrl`](https://github.com/WireGuard/wgctrl-go) library. It reports gauge metrics for Wireguard interface device(s) and its peers. -### Configuration +## Configuration ```toml # Collect Wireguard server interface and peer statistics @@ -14,7 +14,7 @@ reports gauge metrics for Wireguard interface device(s) and its peers. # devices = ["wg0"] ``` -### Metrics +## Metrics - `wireguard_device` - tags: @@ -37,9 +37,9 @@ reports gauge metrics for Wireguard interface device(s) and its peers. - `rx_bytes` (int, number of bytes received from this peer) - `tx_bytes` (int, number of bytes transmitted to this peer) -### Troubleshooting +## Troubleshooting -#### Error: `operation not permitted` +### Error: `operation not permitted` When the kernelspace implementation of Wireguard is in use (as opposed to its userspace implementations), Telegraf communicates with the module over netlink. @@ -50,22 +50,22 @@ To add this capability to the Telegraf binary (to allow this communication under the default user `telegraf`): ```bash -$ sudo setcap CAP_NET_ADMIN+epi $(which telegraf) +sudo setcap CAP_NET_ADMIN+epi $(which telegraf) ``` N.B.: This capability is a filesystem attribute on the binary itself. The attribute needs to be re-applied if the Telegraf binary is rotated (e.g. on installation of new a Telegraf version from the system package manager). -#### Error: `error enumerating Wireguard devices` +### Error: `error enumerating Wireguard devices` This usually happens when the device names specified in config are invalid. Ensure that `sudo wg show` succeeds, and that the device names in config match those printed by this command. -### Example Output +## Example Output -``` +```shell wireguard_device,host=WGVPN,name=wg0,type=linux_kernel firewall_mark=51820i,listen_port=58216i 1582513589000000000 wireguard_device,host=WGVPN,name=wg0,type=linux_kernel peers=1i 1582513589000000000 wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= allowed_ips=2i,persistent_keepalive_interval_ns=60000000000i,protocol_version=1i 1582513589000000000 diff --git a/plugins/inputs/wireless/README.md b/plugins/inputs/wireless/README.md index 6be7bd383d451..b46a4604834ea 100644 --- a/plugins/inputs/wireless/README.md +++ b/plugins/inputs/wireless/README.md @@ -2,7 +2,7 @@ The wireless plugin gathers metrics about wireless link quality by reading the `/proc/net/wireless` file. This plugin currently supports linux only. -### Configuration: +## Configuration ```toml # Monitor wifi signal strength and quality @@ -12,7 +12,7 @@ The wireless plugin gathers metrics about wireless link quality by reading the ` # host_proc = "/proc" ``` -### Metrics: +## Metrics - metric - tags: @@ -29,10 +29,10 @@ The wireless plugin gathers metrics about wireless link quality by reading the ` - misc (int64, packets, counter) - dropped for un-specified reason - missed_beacon (int64, packets, counter) - missed beacon packets -### Example Output: +## Example Output This section shows example output in Line Protocol format. -``` +```shell wireless,host=example.localdomain,interface=wlan0 misc=0i,frag=0i,link=60i,level=-50i,noise=-256i,nwid=0i,crypt=0i,retry=1525i,missed_beacon=0i,status=0i 1519843022000000000 ``` diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md index 5211c38e9a9c2..1412c4bd9ab4e 100644 --- a/plugins/inputs/x509_cert/README.md +++ b/plugins/inputs/x509_cert/README.md @@ -5,8 +5,7 @@ file or network connection. When using a UDP address as a certificate source, the server must support [DTLS](https://en.wikipedia.org/wiki/Datagram_Transport_Layer_Security). - -### Configuration +## Configuration ```toml # Reads metrics from a SSL certificate @@ -33,8 +32,7 @@ When using a UDP address as a certificate source, the server must support [DTLS] # tls_server_name = "myhost.example.org" ``` - -### Metrics +## Metrics - x509_cert - tags: @@ -59,10 +57,9 @@ When using a UDP address as a certificate source, the server must support [DTLS] - startdate (int, seconds) - enddate (int, seconds) +## Example output -### Example output - -``` +```shell x509_cert,common_name=ubuntu,source=/etc/ssl/certs/ssl-cert-snakeoil.pem,verification=valid age=7693222i,enddate=1871249033i,expiry=307666777i,startdate=1555889033i,verification_code=0i 1563582256000000000 x509_cert,common_name=www.example.org,country=US,locality=Los\ Angeles,organization=Internet\ Corporation\ for\ Assigned\ Names\ and\ Numbers,organizational_unit=Technology,province=California,source=https://example.org:443,verification=invalid age=20219055i,enddate=1606910400i,expiry=43328144i,startdate=1543363200i,verification_code=1i,verification_error="x509: certificate signed by unknown authority" 1563582256000000000 x509_cert,common_name=DigiCert\ SHA2\ Secure\ Server\ CA,country=US,organization=DigiCert\ Inc,source=https://example.org:443,verification=valid age=200838255i,enddate=1678276800i,expiry=114694544i,startdate=1362744000i,verification_code=0i 1563582256000000000 diff --git a/plugins/inputs/zipkin/README.md b/plugins/inputs/zipkin/README.md index f07ca6e55afad..3b47da5f430d5 100644 --- a/plugins/inputs/zipkin/README.md +++ b/plugins/inputs/zipkin/README.md @@ -5,7 +5,8 @@ This plugin implements the Zipkin http server to gather trace and timing data ne *Please Note: This plugin is experimental; Its data schema may be subject to change based on its main usage cases and the evolution of the OpenTracing standard.* -## Configuration: +## Configuration + ```toml [[inputs.zipkin]] path = "/api/v1/spans" # URL path for span data @@ -15,7 +16,7 @@ based on its main usage cases and the evolution of the OpenTracing standard.* The plugin accepts spans in `JSON` or `thrift` if the `Content-Type` is `application/json` or `application/x-thrift`, respectively. If `Content-Type` is not set, then the plugin assumes it is `JSON` format. -## Tracing: +## Tracing This plugin uses Annotations tags and fields to track data from spans @@ -28,93 +29,106 @@ Traces are built by collecting all Spans that share a traceId. Annotations may have the following values: - - __CS (client start):__ beginning of span, request is made. - - __SR (server receive):__ server receives request and will start processing it + - __CS (client start):__ beginning of span, request is made. + - __SR (server receive):__ server receives request and will start processing it network latency & clock jitters differ it from cs - - __SS (server send):__ server is done processing and sends request back to client + - __SS (server send):__ server is done processing and sends request back to client amount of time it took to process request will differ it from sr - - __CR (client receive):__ end of span, client receives response from server + - __CR (client receive):__ end of span, client receives response from server RPC is considered complete with this annotation ### Tags -* __"id":__ The 64 bit ID of the span. -* __"parent_id":__ An ID associated with a particular child span. If there is no child span, the parent ID is set to ID. -* __"trace_id":__ The 64 or 128-bit ID of a particular trace. Every span in a trace shares this ID. Concatenation of high and low and converted to hexadecimal. -* __"name":__ Defines a span - -##### Annotations have these additional tags: -* __"service_name":__ Defines a service -* __"annotation":__ The value of an annotation -* __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated +- __"id":__ The 64 bit ID of the span. +- __"parent_id":__ An ID associated with a particular child span. If there is no child span, the parent ID is set to ID. +- __"trace_id":__ The 64 or 128-bit ID of a particular trace. Every span in a trace shares this ID. Concatenation of high and low and converted to hexadecimal. +- __"name":__ Defines a span -##### Binary Annotations have these additional tag: +#### Annotations have these additional tags - * __"service_name":__ Defines a service - * __"annotation":__ The value of an annotation - * __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated - * __"annotation_key":__ label describing the annotation +- __"service_name":__ Defines a service +- __"annotation":__ The value of an annotation +- __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated +#### Binary Annotations have these additional tag -### Fields: - * __"duration_ns":__ The time in nanoseconds between the end and beginning of a span. +- __"service_name":__ Defines a service +- __"annotation":__ The value of an annotation +- __"endpoint_host":__ Listening port concat with IPV4, if port is not present it will not be concatenated +- __"annotation_key":__ label describing the annotation +## Fields +- __"duration_ns":__ The time in nanoseconds between the end and beginning of a span. -### Sample Queries: +## Sample Queries __Get All Span Names for Service__ `my_web_server` + ```sql SHOW TAG VALUES FROM "zipkin" with key="name" WHERE "service_name" = 'my_web_server' ``` - - __Description:__ returns a list containing the names of the spans which have annotations with the given `service_name` of `my_web_server`. -__Get All Service Names__ +- __Description:__ returns a list containing the names of the spans which have annotations with the given `service_name` of `my_web_server`. + +-__Get All Service Names__- + ```sql SHOW TAG VALUES FROM "zipkin" WITH KEY = "service_name" ``` - - __Description:__ returns a list of all `distinct` endpoint service names. -__Find spans with longest duration__ +- __Description:__ returns a list of all `distinct` endpoint service names. + +-__Find spans with longest duration__- + ```sql SELECT max("duration_ns") FROM "zipkin" WHERE "service_name" = 'my_service' AND "name" = 'my_span_name' AND time > now() - 20m GROUP BY "trace_id",time(30s) LIMIT 5 ``` - - __Description:__ In the last 20 minutes find the top 5 longest span durations for service `my_server` and span name `my_span_name` +- __Description:__ In the last 20 minutes find the top 5 longest span durations for service `my_server` and span name `my_span_name` ### Recommended InfluxDB setup This test will create high cardinality data so we recommend using the [tsi influxDB engine](https://www.influxdata.com/path-1-billion-time-series-influxdb-high-cardinality-indexing-ready-testing/). + #### How To Set Up InfluxDB For Work With Zipkin - ##### Steps - 1. ___Update___ InfluxDB to >= 1.3, in order to use the new tsi engine. +##### Steps - 2. ___Generate___ a config file with the following command: -```sh -influxd config > /path/for/config/file -``` - 3. ___Add___ the following to your config file, under the `[data]` tab: -```toml -[data] - index-version = "tsi1" -``` +1. ___Update___ InfluxDB to >= 1.3, in order to use the new tsi engine. - 4. ___Start___ `influxd` with your new config file: -```sh -influxd -config=/path/to/your/config/file -``` +2. ___Generate___ a config file with the following command: - 5. ___Update___ your retention policy: -```sql -ALTER RETENTION POLICY "autogen" ON "telegraf" DURATION 1d SHARD DURATION 30m -``` + ```sh + influxd config > /path/for/config/file + ``` + +3. ___Add___ the following to your config file, under the `[data]` tab: + + ```toml + [data] + index-version = "tsi1" + ``` + +4. ___Start___ `influxd` with your new config file: -### Example Input Trace: + ```sh + influxd -config=/path/to/your/config/file + ``` + +5. ___Update___ your retention policy: + + ```sql + ALTER RETENTION POLICY "autogen" ON "telegraf" DURATION 1d SHARD DURATION 30m + ``` + +### Example Input Trace - [Cli microservice with two services Test](https://github.com/openzipkin/zipkin-go-opentracing/tree/master/examples/cli_with_2_services) - [Test data from distributed trace repo sample json](https://github.com/mattkanwisher/distributedtrace/blob/master/testclient/sample.json) + #### [Trace Example from Zipkin model](http://zipkin.io/pages/data_model.html) + ```json { "traceId": "bd7a977555f6b982", diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md index 0ce7f442a4bba..76c89aac22d46 100644 --- a/plugins/inputs/zookeeper/README.md +++ b/plugins/inputs/zookeeper/README.md @@ -3,7 +3,7 @@ The zookeeper plugin collects variables outputted from the 'mntr' command [Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). -### Configuration +## Configuration ```toml # Reads 'mntr' stats from one or many zookeeper servers @@ -27,7 +27,7 @@ The zookeeper plugin collects variables outputted from the 'mntr' command # insecure_skip_verify = true ``` -### Metrics: +## Metrics Exact field names are based on Zookeeper response and may vary between configuration, platform, and version. @@ -56,9 +56,10 @@ configuration, platform, and version. - synced_followers (integer, leader only) - pending_syncs (integer, leader only) -### Debugging: +## Debugging If you have any issues please check the direct Zookeeper output using netcat: + ```sh $ echo mntr | nc localhost 2181 zk_version 3.4.9-3--1, built on Thu, 01 Jun 2017 16:26:44 -0700 @@ -78,8 +79,8 @@ zk_open_file_descriptor_count 44 zk_max_file_descriptor_count 4096 ``` -### Example Output +## Example Output -``` +```shell zookeeper,server=localhost,port=2181,state=standalone ephemerals_count=0i,approximate_data_size=10044i,open_file_descriptor_count=44i,max_latency=0i,packets_received=7i,outstanding_requests=0i,znode_count=129i,max_file_descriptor_count=4096i,version="3.4.9-3--1",avg_latency=0i,packets_sent=6i,num_alive_connections=1i,watch_count=0i,min_latency=0i 1522351112000000000 ``` From 8e85a67ee112ebfb60b9c2710b595990d4d95da5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 24 Nov 2021 19:52:51 +0100 Subject: [PATCH 075/133] fix: Linter fixes for plugins/parsers/[a-z]* (#10145) --- config/config_test.go | 12 +- plugins/parsers/collectd/parser.go | 11 +- plugins/parsers/collectd/parser_test.go | 7 +- plugins/parsers/csv/parser_test.go | 7 +- plugins/parsers/dropwizard/parser.go | 8 +- plugins/parsers/dropwizard/parser_test.go | 109 +++--- plugins/parsers/graphite/parser.go | 7 +- plugins/parsers/graphite/parser_test.go | 132 +++---- plugins/parsers/grok/parser.go | 51 +-- plugins/parsers/grok/parser_test.go | 351 +++++++++--------- plugins/parsers/influx/machine_test.go | 14 +- plugins/parsers/influx/parser_test.go | 8 +- plugins/parsers/json/parser.go | 25 +- plugins/parsers/nagios/parser.go | 22 +- plugins/parsers/nagios/parser_test.go | 49 ++- plugins/parsers/prometheus/parser.go | 17 +- plugins/parsers/prometheus/parser_test.go | 29 +- .../prometheusremotewrite/parser_test.go | 23 +- plugins/parsers/value/parser_test.go | 156 ++++---- plugins/parsers/wavefront/element.go | 8 +- plugins/parsers/wavefront/parser.go | 8 +- plugins/parsers/wavefront/parser_test.go | 137 +++---- plugins/parsers/xpath/parser.go | 12 +- 23 files changed, 620 insertions(+), 583 deletions(-) diff --git a/config/config_test.go b/config/config_test.go index 940b84ada7773..546b752f3a383 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -10,13 +10,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { @@ -140,12 +141,17 @@ func TestConfig_LoadDirectory(t *testing.T) { expectedConfigs[0].Tags = make(map[string]string) expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin) - p, err := parsers.NewParser(&parsers.Config{ + parserConfig := &parsers.Config{ MetricName: "exec", DataFormat: "json", JSONStrict: true, - }) + } + p, err := parsers.NewParser(parserConfig) require.NoError(t, err) + + // Inject logger to have proper struct for comparison + models.SetLoggerOnPlugin(p, models.NewLogger("parsers", parserConfig.DataFormat, parserConfig.MetricName)) + expectedPlugins[1].SetParser(p) expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar" expectedConfigs[1] = &models.InputConfig{ diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index f0f9773472c4f..3bd99257c31d9 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -3,7 +3,6 @@ package collectd import ( "errors" "fmt" - "log" "os" "collectd.org/api" @@ -24,6 +23,7 @@ type CollectdParser struct { //whether or not to split multi value metric into multiple metrics //default value is split ParseMultiValue string + Log telegraf.Logger `toml:"-"` popts network.ParseOpts } @@ -81,7 +81,7 @@ func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := []telegraf.Metric{} for _, valueList := range valueLists { - metrics = append(metrics, UnmarshalValueList(valueList, p.ParseMultiValue)...) + metrics = append(metrics, p.unmarshalValueList(valueList)...) } if len(p.DefaultTags) > 0 { @@ -115,12 +115,13 @@ func (p *CollectdParser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -// UnmarshalValueList translates a ValueList into a Telegraf metric. -func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric { +// unmarshalValueList translates a ValueList into a Telegraf metric. +func (p *CollectdParser) unmarshalValueList(vl *api.ValueList) []telegraf.Metric { timestamp := vl.Time.UTC() var metrics []telegraf.Metric + var multiValue = p.ParseMultiValue //set multiValue to default "split" if nothing is specified if multiValue == "" { multiValue = "split" @@ -192,7 +193,7 @@ func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric metrics = append(metrics, m) default: - log.Printf("parse-multi-value config can only be 'split' or 'join'") + p.Log.Info("parse-multi-value config can only be 'split' or 'join'") } return metrics } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index a218341569e23..4893fbbe58dc3 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -6,7 +6,6 @@ import ( "collectd.org/api" "collectd.org/network" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -144,7 +143,7 @@ func TestParseMultiValueSplit(t *testing.T) { metrics, err := parser.Parse(bytes) require.NoError(t, err) - assert.Equal(t, 2, len(metrics)) + require.Equal(t, 2, len(metrics)) } func TestParse_DefaultTags(t *testing.T) { @@ -215,7 +214,7 @@ func TestParse_SignSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + _, err = parser.Parse(bytes) require.Error(t, err) } @@ -270,7 +269,7 @@ func TestParse_EncryptSecurityLevel(t *testing.T) { bytes, err = buf.Bytes() require.NoError(t, err) - metrics, err = parser.Parse(bytes) + _, err = parser.Parse(bytes) require.Error(t, err) } diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go index 7eb1d0d8dbed0..5fc72bdb5f9e7 100644 --- a/plugins/parsers/csv/parser_test.go +++ b/plugins/parsers/csv/parser_test.go @@ -6,10 +6,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var DefaultTime = func() time.Time { @@ -100,6 +101,8 @@ func TestTimestamp(t *testing.T) { TimeFunc: DefaultTime, }, ) + require.NoError(t, err) + testCSV := `line1,line2,line3 23/05/09 04:05:06 PM,70,test_name 07/11/09 04:05:06 PM,80,test_name2` @@ -121,6 +124,8 @@ func TestTimestampYYYYMMDDHHmm(t *testing.T) { TimeFunc: DefaultTime, }, ) + require.NoError(t, err) + testCSV := `line1,line2,line3 200905231605,70,test_name 200907111605,80,test_name2` diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 2115bd8a07e78..250f08a297edc 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -3,14 +3,14 @@ package dropwizard import ( "encoding/json" "fmt" - "log" "time" + "github.com/tidwall/gjson" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/tidwall/gjson" ) type TimeFunc func() time.Time @@ -42,6 +42,8 @@ type parser struct { // an optional map of default tags to use for metrics DefaultTags map[string]string + Log telegraf.Logger `toml:"-"` + separator string templateEngine *templating.Engine @@ -152,7 +154,7 @@ func (p *parser) readTags(buf []byte) map[string]string { var tags map[string]string err := json.Unmarshal(tagsBytes, &tags) if err != nil { - log.Printf("W! failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err) + p.Log.Warnf("Failed to parse tags from JSON path '%s': %s\n", p.TagsPath, err) } else if len(tags) > 0 { return tags } diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index b867670c9400e..dfd05f4b7164c 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -1,16 +1,15 @@ package dropwizard import ( - "testing" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "fmt" + "github.com/influxdata/telegraf/testutil" + "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" ) var testTimeFunc = func() time.Time { @@ -34,8 +33,8 @@ func TestParseValidEmptyJSON(t *testing.T) { // Most basic vanilla test metrics, err := parser.Parse([]byte(validEmptyJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 0) + require.NoError(t, err) + require.Len(t, metrics, 0) } // validCounterJSON is a valid dropwizard json document containing one counter @@ -58,13 +57,13 @@ func TestParseValidCounterJSON(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validCounterJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "counter"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "counter"}, metrics[0].Tags()) } // validEmbeddedCounterJSON is a valid json document containing separate fields for dropwizard metrics, tags and time override. @@ -99,19 +98,19 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { parser.TimePath = "time" metrics, err := parser.Parse([]byte(validEmbeddedCounterJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "metric_type": "counter", "tag1": "green", "tag2": "yellow", "tag3 space,comma=equals": "red ,=", }, metrics[0].Tags()) - assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) + require.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) // now test json tags through TagPathsMap parser2 := NewParser() @@ -119,8 +118,8 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { parser2.TagPathsMap = map[string]string{"tag1": "tags.tag1"} parser2.TimePath = "time" metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) - assert.NoError(t, err2) - assert.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) + require.NoError(t, err2) + require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) } // validMeterJSON1 is a valid dropwizard json document containing one meter @@ -148,10 +147,10 @@ func TestParseValidMeterJSON1(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validMeterJSON1)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement1", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement1", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(1), "m15_rate": float64(1), "m1_rate": float64(1), @@ -160,7 +159,7 @@ func TestParseValidMeterJSON1(t *testing.T) { "units": "events/second", }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "meter"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "meter"}, metrics[0].Tags()) } // validMeterJSON2 is a valid dropwizard json document containing one meter with one tag @@ -188,10 +187,10 @@ func TestParseValidMeterJSON2(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validMeterJSON2)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement2", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement2", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(2), "m15_rate": float64(2), "m1_rate": float64(2), @@ -199,7 +198,7 @@ func TestParseValidMeterJSON2(t *testing.T) { "mean_rate": float64(2), "units": "events/second", }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "meter", "key": "value"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "meter", "key": "value"}, metrics[0].Tags()) } // validGaugeJSON is a valid dropwizard json document containing one gauge @@ -222,13 +221,13 @@ func TestParseValidGaugeJSON(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validGaugeJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": true, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "gauge"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "gauge"}, metrics[0].Tags()) } // validHistogramJSON is a valid dropwizard json document containing one histogram @@ -261,10 +260,10 @@ func TestParseValidHistogramJSON(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validHistogramJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(1), "max": float64(2), "mean": float64(3), @@ -277,7 +276,7 @@ func TestParseValidHistogramJSON(t *testing.T) { "p999": float64(10), "stddev": float64(11), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "histogram"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "histogram"}, metrics[0].Tags()) } // validTimerJSON is a valid dropwizard json document containing one timer @@ -316,10 +315,10 @@ func TestParseValidTimerJSON(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validTimerJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "measurement", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "measurement", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "count": float64(1), "max": float64(2), "mean": float64(3), @@ -338,7 +337,7 @@ func TestParseValidTimerJSON(t *testing.T) { "duration_units": "seconds", "rate_units": "calls/second", }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"metric_type": "timer"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"metric_type": "timer"}, metrics[0].Tags()) } // validAllJSON is a valid dropwizard json document containing one metric of each type @@ -367,8 +366,8 @@ func TestParseValidAllJSON(t *testing.T) { parser := NewParser() metrics, err := parser.Parse([]byte(validAllJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 5) + require.NoError(t, err) + require.Len(t, metrics, 5) } func TestTagParsingProblems(t *testing.T) { @@ -376,20 +375,22 @@ func TestTagParsingProblems(t *testing.T) { parser1 := NewParser() parser1.MetricRegistryPath = "metrics" parser1.TagsPath = "tags1" + parser1.Log = testutil.Logger{} metrics1, err1 := parser1.Parse([]byte(validEmbeddedCounterJSON)) - assert.NoError(t, err1) - assert.Len(t, metrics1, 1) - assert.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags()) + require.NoError(t, err1) + require.Len(t, metrics1, 1) + require.Equal(t, map[string]string{"metric_type": "counter"}, metrics1[0].Tags()) // giving a wrong TagsPath falls back to TagPathsMap parser2 := NewParser() parser2.MetricRegistryPath = "metrics" parser2.TagsPath = "tags1" parser2.TagPathsMap = map[string]string{"tag1": "tags.tag1"} + parser2.Log = testutil.Logger{} metrics2, err2 := parser2.Parse([]byte(validEmbeddedCounterJSON)) - assert.NoError(t, err2) - assert.Len(t, metrics2, 1) - assert.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) + require.NoError(t, err2) + require.Len(t, metrics2, 1) + require.Equal(t, map[string]string{"metric_type": "counter", "tag1": "green"}, metrics2[0].Tags()) } // sampleTemplateJSON is a sample json document containing metrics to be tested against the templating engine. diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index dac4f55f83f25..954424cefcbc4 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -20,7 +20,6 @@ var ( MaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC) ) -// Parser encapsulates a Graphite Parser. type GraphiteParser struct { Separator string Templates []string @@ -77,9 +76,9 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { line = bytes.TrimSpace(buf) // last line } if len(line) != 0 { - metric, err := p.ParseLine(string(line)) + m, err := p.ParseLine(string(line)) if err == nil { - metrics = append(metrics, metric) + metrics = append(metrics, m) } else { errs = append(errs, err.Error()) } @@ -95,7 +94,7 @@ func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } -// Parse performs Graphite parsing of a single line. +// ParseLine performs Graphite parsing of a single line. func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { // Break into 3 fields (name, value, timestamp). fields := strings.Fields(line) diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 991cce661762c..b08f7cd0ec9c6 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -6,11 +6,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func BenchmarkParse(b *testing.B) { @@ -30,7 +30,8 @@ func BenchmarkParse(b *testing.B) { } for i := 0; i < b.N; i++ { - p.Parse([]byte("servers.localhost.cpu.load 11 1435077219")) + _, err := p.Parse([]byte("servers.localhost.cpu.load 11 1435077219")) + require.NoError(b, err) } } @@ -285,7 +286,7 @@ func TestParseLine(t *testing.T) { t.Fatalf("unexpected error creating graphite parser: %v", err) } - metric, err := p.ParseLine(test.input) + m, err := p.ParseLine(test.input) if errstr(err) != test.err { t.Fatalf("err does not match. expected %v, got %v", test.err, err) } @@ -293,22 +294,22 @@ func TestParseLine(t *testing.T) { // If we erred out,it was intended and the following tests won't work continue } - if metric.Name() != test.measurement { + if m.Name() != test.measurement { t.Fatalf("name parse failer. expected %v, got %v", - test.measurement, metric.Name()) + test.measurement, m.Name()) } - if len(metric.Tags()) != len(test.tags) { + if len(m.Tags()) != len(test.tags) { t.Fatalf("tags len mismatch. expected %d, got %d", - len(test.tags), len(metric.Tags())) + len(test.tags), len(m.Tags())) } - f := metric.Fields()["value"].(float64) + f := m.Fields()["value"].(float64) if f != test.value { t.Fatalf("floatValue value mismatch. expected %v, got %v", test.value, f) } - if metric.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 { + if m.Time().UnixNano()/1000000 != test.time.UnixNano()/1000000 { t.Fatalf("time value mismatch. expected %v, got %v", - test.time.UnixNano(), metric.Time().UnixNano()) + test.time.UnixNano(), m.Time().UnixNano()) } } } @@ -478,9 +479,9 @@ func TestFilterMatchDefault(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("miss.servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchMultipleMeasurement(t *testing.T) { @@ -495,9 +496,9 @@ func TestFilterMatchMultipleMeasurement(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) { @@ -505,7 +506,7 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) { []string{"servers.localhost .host.measurement.measurement*"}, nil, ) - assert.NoError(t, err) + require.NoError(t, err) exp := metric.New("cpu_cpu_load_10", map[string]string{"host": "localhost"}, @@ -513,9 +514,9 @@ func TestFilterMatchMultipleMeasurementSeparator(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.cpu.cpu_load.10 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchSingle(t *testing.T) { @@ -530,9 +531,9 @@ func TestFilterMatchSingle(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestParseNoMatch(t *testing.T) { @@ -547,9 +548,9 @@ func TestParseNoMatch(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.memory.VmallocChunk 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchWildcard(t *testing.T) { @@ -564,9 +565,9 @@ func TestFilterMatchWildcard(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchExactBeforeWildcard(t *testing.T) { @@ -583,9 +584,9 @@ func TestFilterMatchExactBeforeWildcard(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestFilterMatchMostLongestFilter(t *testing.T) { @@ -602,7 +603,7 @@ func TestFilterMatchMostLongestFilter(t *testing.T) { } m, err := p.ParseLine("servers.localhost.cpu.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) value, ok := m.GetTag("host") require.True(t, ok) @@ -631,9 +632,9 @@ func TestFilterMatchMultipleWildcards(t *testing.T) { time.Unix(1435077219, 0)) m, err := p.ParseLine("servers.server01.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, exp, m) + require.Equal(t, exp, m) } func TestParseDefaultTags(t *testing.T) { @@ -647,7 +648,7 @@ func TestParseDefaultTags(t *testing.T) { } m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) value, ok := m.GetTag("host") require.True(t, ok) @@ -672,7 +673,7 @@ func TestParseDefaultTemplateTags(t *testing.T) { } m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) value, ok := m.GetTag("host") require.True(t, ok) @@ -698,7 +699,7 @@ func TestParseDefaultTemplateTagsOverridGlobal(t *testing.T) { m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") _ = m - assert.NoError(t, err) + require.NoError(t, err) value, ok := m.GetTag("host") require.True(t, ok) @@ -725,7 +726,7 @@ func TestParseTemplateWhitespace(t *testing.T) { } m, err := p.ParseLine("servers.localhost.cpu_load 11 1435077219") - assert.NoError(t, err) + require.NoError(t, err) value, ok := m.GetTag("host") require.True(t, ok) @@ -745,10 +746,11 @@ func TestApplyTemplate(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement"}, nil) - assert.NoError(t, err) + require.NoError(t, err) - measurement, _, _, _ := p.ApplyTemplate("current.users") - assert.Equal(t, "current_users", measurement) + measurement, _, _, err := p.ApplyTemplate("current.users") + require.NoError(t, err) + require.Equal(t, "current_users", measurement) } // Test basic functionality of ApplyTemplate @@ -756,10 +758,11 @@ func TestApplyTemplateNoMatch(t *testing.T) { p, err := NewGraphiteParser(".", []string{"foo.bar measurement.measurement"}, nil) - assert.NoError(t, err) + require.NoError(t, err) - measurement, _, _, _ := p.ApplyTemplate("current.users") - assert.Equal(t, "current.users", measurement) + measurement, _, _, err := p.ApplyTemplate("current.users") + require.NoError(t, err) + require.Equal(t, "current.users", measurement) } // Test that most specific template is chosen @@ -769,10 +772,10 @@ func TestApplyTemplateSpecific(t *testing.T) { "current.* measurement.measurement", "current.*.* measurement.measurement.service", }, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, tags, _, _ := p.ApplyTemplate("current.users.facebook") - assert.Equal(t, "current_users", measurement) + require.Equal(t, "current_users", measurement) service, ok := tags["service"] if !ok { @@ -786,10 +789,10 @@ func TestApplyTemplateSpecific(t *testing.T) { func TestApplyTemplateTags(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement region=us-west"}, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, tags, _, _ := p.ApplyTemplate("current.users") - assert.Equal(t, "current_users", measurement) + require.Equal(t, "current_users", measurement) region, ok := tags["region"] if !ok { @@ -803,11 +806,11 @@ func TestApplyTemplateTags(t *testing.T) { func TestApplyTemplateField(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement.field"}, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, _, field, err := p.ApplyTemplate("current.users.logged_in") - - assert.Equal(t, "current_users", measurement) + require.NoError(t, err) + require.Equal(t, "current_users", measurement) if field != "logged_in" { t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", @@ -818,11 +821,11 @@ func TestApplyTemplateField(t *testing.T) { func TestApplyTemplateMultipleFieldsTogether(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement.field.field"}, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh") - - assert.Equal(t, "current_users", measurement) + require.NoError(t, err) + require.Equal(t, "current_users", measurement) if field != "logged_in_ssh" { t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", @@ -833,11 +836,11 @@ func TestApplyTemplateMultipleFieldsTogether(t *testing.T) { func TestApplyTemplateMultipleFieldsApart(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement.field.method.field"}, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, _, field, err := p.ApplyTemplate("current.users.logged_in.ssh.total") - - assert.Equal(t, "current_users", measurement) + require.NoError(t, err) + require.Equal(t, "current_users", measurement) if field != "logged_in_total" { t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", @@ -848,11 +851,11 @@ func TestApplyTemplateMultipleFieldsApart(t *testing.T) { func TestApplyTemplateGreedyField(t *testing.T) { p, err := NewGraphiteParser("_", []string{"current.* measurement.measurement.field*"}, nil) - assert.NoError(t, err) + require.NoError(t, err) measurement, _, field, err := p.ApplyTemplate("current.users.logged_in") - - assert.Equal(t, "current_users", measurement) + require.NoError(t, err) + require.Equal(t, "current_users", measurement) if field != "logged_in" { t.Errorf("Parser.ApplyTemplate unexpected result. got %s, exp %s", @@ -868,11 +871,12 @@ func TestApplyTemplateOverSpecific(t *testing.T) { }, nil, ) - assert.NoError(t, err) + require.NoError(t, err) measurement, tags, _, err := p.ApplyTemplate("net.server001.a.b 2") - assert.Equal(t, "net", measurement) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, "net", measurement) + require.Equal(t, map[string]string{"host": "server001", "metric": "a.b"}, tags) } @@ -887,17 +891,19 @@ func TestApplyTemplateMostSpecificTemplate(t *testing.T) { }, nil, ) - assert.NoError(t, err) + require.NoError(t, err) measurement, tags, _, err := p.ApplyTemplate("net.server001.a.b.c 2") - assert.Equal(t, "net", measurement) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, "net", measurement) + require.Equal(t, map[string]string{"host": "server001", "metric": "a.b.c"}, tags) measurement, tags, _, err = p.ApplyTemplate("net.server001.a.b 2") - assert.Equal(t, "net", measurement) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, "net", measurement) + require.Equal(t, map[string]string{"host": "server001", "metric": "a.b"}, tags) } diff --git a/plugins/parsers/grok/parser.go b/plugins/parsers/grok/parser.go index 57e6269994ed2..f869b3039c483 100644 --- a/plugins/parsers/grok/parser.go +++ b/plugins/parsers/grok/parser.go @@ -4,16 +4,16 @@ import ( "bufio" "bytes" "fmt" - "log" "os" "regexp" "strconv" "strings" "time" + "github.com/vjeantet/grok" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/vjeantet/grok" ) var timeLayouts = map[string]string{ @@ -76,6 +76,7 @@ type Parser struct { CustomPatternFiles []string Measurement string DefaultTags map[string]string + Log telegraf.Logger `toml:"-"` // Timezone is an optional component to help render log dates to // your chosen zone. @@ -107,13 +108,13 @@ type Parser struct { // } // } tsMap map[string]map[string]string - // patterns is a map of all of the parsed patterns from CustomPatterns + // patternsMap is a map of all of the parsed patterns from CustomPatterns // and CustomPatternFiles. // ie, { // "DURATION": "%{NUMBER}[nuµm]?s" // "RESPONSE_CODE": "%{NUMBER:rc:tag}" // } - patterns map[string]string + patternsMap map[string]string // foundTsLayouts is a slice of timestamp patterns that have been found // in the log lines. This slice gets updated if the user uses the generic // 'ts' modifier for timestamps. This slice is checked first for matches, @@ -130,7 +131,7 @@ type Parser struct { func (p *Parser) Compile() error { p.typeMap = make(map[string]map[string]string) p.tsMap = make(map[string]map[string]string) - p.patterns = make(map[string]string) + p.patternsMap = make(map[string]string) p.tsModder = &tsModder{} var err error p.g, err = grok.NewWithConfig(&grok.Config{NamedCapturesOnly: true}) @@ -180,7 +181,7 @@ func (p *Parser) Compile() error { p.loc, err = time.LoadLocation(p.Timezone) if err != nil { - log.Printf("W! improper timezone supplied (%s), setting loc to UTC", p.Timezone) + p.Log.Warnf("Improper timezone supplied (%s), setting loc to UTC", p.Timezone) p.loc, _ = time.LoadLocation("UTC") } @@ -209,7 +210,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } if len(values) == 0 { - log.Printf("D! Grok no match found for: %q", line) + p.Log.Debugf("Grok no match found for: %q", line) return nil, nil } @@ -252,21 +253,21 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case Int: iv, err := strconv.ParseInt(v, 0, 64) if err != nil { - log.Printf("E! Error parsing %s to int: %s", v, err) + p.Log.Errorf("Error parsing %s to int: %s", v, err) } else { fields[k] = iv } case Float: fv, err := strconv.ParseFloat(v, 64) if err != nil { - log.Printf("E! Error parsing %s to float: %s", v, err) + p.Log.Errorf("Error parsing %s to float: %s", v, err) } else { fields[k] = fv } case Duration: d, err := time.ParseDuration(v) if err != nil { - log.Printf("E! Error parsing %s to duration: %s", v, err) + p.Log.Errorf("Error parsing %s to duration: %s", v, err) } else { fields[k] = int64(d) } @@ -277,13 +278,13 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case Epoch: parts := strings.SplitN(v, ".", 2) if len(parts) == 0 { - log.Printf("E! Error parsing %s to timestamp: %s", v, err) + p.Log.Errorf("Error parsing %s to timestamp: %s", v, err) break } sec, err := strconv.ParseInt(parts[0], 10, 64) if err != nil { - log.Printf("E! Error parsing %s to timestamp: %s", v, err) + p.Log.Errorf("Error parsing %s to timestamp: %s", v, err) break } ts := time.Unix(sec, 0) @@ -293,7 +294,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { nsString := strings.Replace(padded[:9], " ", "0", -1) nanosec, err := strconv.ParseInt(nsString, 10, 64) if err != nil { - log.Printf("E! Error parsing %s to timestamp: %s", v, err) + p.Log.Errorf("Error parsing %s to timestamp: %s", v, err) break } ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) @@ -302,14 +303,14 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case EpochMilli: ms, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("E! Error parsing %s to int: %s", v, err) + p.Log.Errorf("Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, ms*int64(time.Millisecond)) } case EpochNano: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { - log.Printf("E! Error parsing %s to int: %s", v, err) + p.Log.Errorf("Error parsing %s to int: %s", v, err) } else { timestamp = time.Unix(0, iv) } @@ -321,7 +322,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } timestamp = ts } else { - log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) + p.Log.Errorf("Error parsing %s to time layout [%s]: %s", v, t, err) } case GenericTimestamp: var foundTs bool @@ -350,7 +351,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { // if we still haven't found a timestamp layout, log it and we will // just use time.Now() if !foundTs { - log.Printf("E! Error parsing timestamp [%s], could not find any "+ + p.Log.Errorf("Error parsing timestamp [%s], could not find any "+ "suitable time layouts.", v) } case Drop: @@ -364,7 +365,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } timestamp = ts } else { - log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) + p.Log.Errorf("Error parsing %s to time layout [%s]: %s", v, t, err) } } } @@ -405,7 +406,7 @@ func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) { line := strings.TrimSpace(scanner.Text()) if len(line) > 0 && line[0] != '#' { names := strings.SplitN(line, " ", 2) - p.patterns[names[0]] = names[1] + p.patternsMap[names[0]] = names[1] } } } @@ -415,30 +416,30 @@ func (p *Parser) compileCustomPatterns() error { // check if the pattern contains a subpattern that is already defined // replace it with the subpattern for modifier inheritance. for i := 0; i < 2; i++ { - for name, pattern := range p.patterns { + for name, pattern := range p.patternsMap { subNames := patternOnlyRe.FindAllStringSubmatch(pattern, -1) for _, subName := range subNames { - if subPattern, ok := p.patterns[subName[1]]; ok { + if subPattern, ok := p.patternsMap[subName[1]]; ok { pattern = strings.Replace(pattern, subName[0], subPattern, 1) } } - p.patterns[name] = pattern + p.patternsMap[name] = pattern } } // check if pattern contains modifiers. Parse them out if it does. - for name, pattern := range p.patterns { + for name, pattern := range p.patternsMap { if modifierRe.MatchString(pattern) { // this pattern has modifiers, so parse out the modifiers pattern, err = p.parseTypedCaptures(name, pattern) if err != nil { return err } - p.patterns[name] = pattern + p.patternsMap[name] = pattern } } - return p.g.AddPatternsFromMap(p.patterns) + return p.g.AddPatternsFromMap(p.patternsMap) } // parseTypedCaptures parses the capture modifiers, and then deletes the diff --git a/plugins/parsers/grok/parser_test.go b/plugins/parsers/grok/parser_test.go index d51f30385a964..2ab025d8a4e43 100644 --- a/plugins/parsers/grok/parser_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGrokParse(t *testing.T) { @@ -15,9 +15,11 @@ func TestGrokParse(t *testing.T) { Measurement: "t_met", Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - parser.Compile() - _, err := parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) - assert.NoError(t, err) + err := parser.Compile() + require.NoError(t, err) + + _, err = parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) + require.NoError(t, err) } // Verify that patterns with a regex lookahead fail at compile time. @@ -29,23 +31,23 @@ func TestParsePatternsWithLookahead(t *testing.T) { MYLOG %{NUMBER:num:int} %{NOBOT:client} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) _, err := p.ParseLine(`1466004605359052000 bot`) - assert.Error(t, err) + require.Error(t, err) } func TestMeasurementName(t *testing.T) { p := &Parser{ Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) // Parse an influxdb POST request m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank", @@ -55,19 +57,19 @@ func TestMeasurementName(t *testing.T) { "request": "/apache_pb.gif", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } func TestCLF_IPv6(t *testing.T) { p := &Parser{ Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) m, err := p.ParseLine(`2001:0db8:85a3:0000:0000:8a2e:0370:7334 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank", @@ -77,12 +79,12 @@ func TestCLF_IPv6(t *testing.T) { "request": "/apache_pb.gif", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank", @@ -92,20 +94,20 @@ func TestCLF_IPv6(t *testing.T) { "request": "/apache_pb.gif", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } func TestCustomInfluxdbHttpd(t *testing.T) { p := &Parser{ Patterns: []string{`\[httpd\] %{COMBINED_LOG_FORMAT} %{UUID:uuid:drop} %{NUMBER:response_time_us:int}`}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) // Parse an influxdb POST request m, err := p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:11:33:29 +0100] "POST /write?consistency=any&db=telegraf&precision=ns&rp= HTTP/1.1" 204 0 "-" "InfluxDBClient" 6f61bc44-321b-11e6-8050-000000000000 2513`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(0), "auth": "-", @@ -118,13 +120,13 @@ func TestCustomInfluxdbHttpd(t *testing.T) { "agent": "InfluxDBClient", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "POST", "resp_code": "204"}, m.Tags()) // Parse an influxdb GET request m, err = p.ParseLine(`[httpd] ::1 - - [14/Jun/2016:12:10:02 +0100] "GET /query?db=telegraf&q=SELECT+bytes%2Cresponse_time_us+FROM+logparser_grok+WHERE+http_method+%3D+%27GET%27+AND+response_time_us+%3E+0+AND+time+%3E+now%28%29+-+1h HTTP/1.1" 200 578 "http://localhost:8083/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36" 8a3806f1-3220-11e6-8006-000000000000 988`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(578), "auth": "-", @@ -137,7 +139,7 @@ func TestCustomInfluxdbHttpd(t *testing.T) { "agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } // common log format @@ -146,13 +148,13 @@ func TestBuiltinCommonLogFormat(t *testing.T) { p := &Parser{ Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) // Parse an influxdb POST request m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank", @@ -162,7 +164,7 @@ func TestBuiltinCommonLogFormat(t *testing.T) { "request": "/apache_pb.gif", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } // common log format @@ -171,13 +173,13 @@ func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) { p := &Parser{ Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) // Parse an influxdb POST request m, err := p.ParseLine(`127.0.0.1 user1234 frank1234 [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank1234", @@ -187,7 +189,7 @@ func TestBuiltinCommonLogFormatWithNumbers(t *testing.T) { "request": "/apache_pb.gif", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } // combined log format @@ -196,13 +198,13 @@ func TestBuiltinCombinedLogFormat(t *testing.T) { p := &Parser{ Patterns: []string{"%{COMBINED_LOG_FORMAT}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) // Parse an influxdb POST request m, err := p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`) require.NotNil(t, m) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "resp_bytes": int64(2326), "auth": "frank", @@ -214,7 +216,7 @@ func TestBuiltinCombinedLogFormat(t *testing.T) { "agent": "Mozilla", }, m.Fields()) - assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) + require.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) } func TestCompileStringAndParse(t *testing.T) { @@ -227,19 +229,19 @@ func TestCompileStringAndParse(t *testing.T) { TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), "response_time": int64(5432), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) } func TestCompileErrorsOnInvalidPattern(t *testing.T) { @@ -252,7 +254,7 @@ func TestCompileErrorsOnInvalidPattern(t *testing.T) { TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} `, } - assert.Error(t, p.Compile()) + require.Error(t, p.Compile()) metricA, _ := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`) require.Nil(t, metricA) @@ -262,19 +264,19 @@ func TestParsePatternsWithoutCustom(t *testing.T) { p := &Parser{ Patterns: []string{"%{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) - assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) + require.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) } func TestParseEpochMilli(t *testing.T) { @@ -284,19 +286,19 @@ func TestParseEpochMilli(t *testing.T) { MYAPP %{POSINT:ts:ts-epochmilli} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1568540909963 response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) - assert.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time()) + require.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time()) } func TestParseEpochNano(t *testing.T) { @@ -306,19 +308,19 @@ func TestParseEpochNano(t *testing.T) { MYAPP %{POSINT:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1466004605359052000 response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) - assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) + require.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) } func TestParseEpoch(t *testing.T) { @@ -328,19 +330,19 @@ func TestParseEpoch(t *testing.T) { MYAPP %{POSINT:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1466004605 response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) - assert.Equal(t, time.Unix(1466004605, 0), metricA.Time()) + require.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, time.Unix(1466004605, 0), metricA.Time()) } func TestParseEpochDecimal(t *testing.T) { @@ -395,7 +397,7 @@ func TestParseEpochDecimal(t *testing.T) { parser := &Parser{ Patterns: []string{"%{NUMBER:ts:ts-epoch} value=%{NUMBER:value:int}"}, } - assert.NoError(t, parser.Compile()) + require.NoError(t, parser.Compile()) m, err := parser.ParseLine(tt.line) if tt.noMatch { @@ -420,71 +422,74 @@ func TestParseEpochErrors(t *testing.T) { CustomPatterns: ` MYAPP %{WORD:ts:ts-epoch} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} `, + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) _, err := p.ParseLine(`foobar response_time=20821 mymetric=10890.645`) - assert.NoError(t, err) + require.NoError(t, err) p = &Parser{ Patterns: []string{"%{MYAPP}"}, CustomPatterns: ` MYAPP %{WORD:ts:ts-epochnano} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} `, + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) _, err = p.ParseLine(`foobar response_time=20821 mymetric=10890.645`) - assert.NoError(t, err) + require.NoError(t, err) } func TestParseGenericTimestamp(t *testing.T) { p := &Parser{ Patterns: []string{`\[%{HTTPDATE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[09/Jun/2016:03:37:03 +0000] response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) - assert.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC()) + require.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, time.Unix(1465443423, 0).UTC(), metricA.Time().UTC()) metricB, err := p.ParseLine(`[09/Jun/2016:03:37:04 +0000] response_time=20821 mymetric=10890.645`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, time.Unix(1465443424, 0).UTC(), metricB.Time().UTC()) } func TestParseGenericTimestampNotFound(t *testing.T) { p := &Parser{ Patterns: []string{`\[%{NOTSPACE:ts:ts}\] response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float}`}, + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[foobar] response_time=20821 mymetric=10890.645`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "response_time": int64(20821), "metric": float64(10890.645), }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, map[string]string{}, metricA.Tags()) } func TestCompileFileAndParse(t *testing.T) { @@ -492,12 +497,12 @@ func TestCompileFileAndParse(t *testing.T) { Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, CustomPatternFiles: []string{"./testdata/test-patterns"}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -505,23 +510,23 @@ func TestCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricA.Time().Nanosecond()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricB.Time().Nanosecond()) } @@ -534,19 +539,19 @@ func TestCompileNoModifiersAndParse(t *testing.T) { TEST_LOG_C %{NUMBER:myfloat} %{NUMBER} %{IPORHOST:clientip} %{DURATION:rt} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": "1.25", "rt": "5.432µs", }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, map[string]string{}, metricA.Tags()) } func TestCompileNoNamesAndParse(t *testing.T) { @@ -556,24 +561,26 @@ func TestCompileNoNamesAndParse(t *testing.T) { DURATION %{NUMBER}[nuµm]?s TEST_LOG_C %{NUMBER} %{NUMBER} %{IPORHOST} %{DURATION} `, + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1.25 200 192.168.1.1 5.432µs`) require.Nil(t, metricA) - assert.NoError(t, err) + require.NoError(t, err) } func TestParseNoMatch(t *testing.T) { p := &Parser{ Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, CustomPatternFiles: []string{"./testdata/test-patterns"}, + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`) - assert.NoError(t, err) - assert.Nil(t, metricA) + require.NoError(t, err) + require.Nil(t, metricA) } func TestCompileErrors(t *testing.T) { @@ -584,14 +591,14 @@ func TestCompileErrors(t *testing.T) { TEST_LOG_A %{HTTPDATE:ts1:ts-httpd} %{HTTPDATE:ts2:ts-httpd} %{NUMBER:mynum:int} `, } - assert.Error(t, p.Compile()) + require.Error(t, p.Compile()) // Compile fails because file doesn't exist: p = &Parser{ Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, CustomPatternFiles: []string{"/tmp/foo/bar/baz"}, } - assert.Error(t, p.Compile()) + require.Error(t, p.Compile()) } func TestParseErrors_MissingPattern(t *testing.T) { @@ -614,6 +621,7 @@ func TestParseErrors_WrongIntegerType(t *testing.T) { CustomPatterns: ` TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:int} `, + Log: testutil.Logger{}, } require.NoError(t, p.Compile()) m, err := p.ParseLine(`0 notnumber`) @@ -630,6 +638,7 @@ func TestParseErrors_WrongFloatType(t *testing.T) { CustomPatterns: ` TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:float} `, + Log: testutil.Logger{}, } require.NoError(t, p.Compile()) m, err := p.ParseLine(`0 notnumber`) @@ -646,6 +655,7 @@ func TestParseErrors_WrongDurationType(t *testing.T) { CustomPatterns: ` TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} `, + Log: testutil.Logger{}, } require.NoError(t, p.Compile()) m, err := p.ParseLine(`0 notnumber`) @@ -662,6 +672,7 @@ func TestParseErrors_WrongTimeLayout(t *testing.T) { CustomPatterns: ` TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} `, + Log: testutil.Logger{}, } require.NoError(t, p.Compile()) m, err := p.ParseLine(`0 notnumber`) @@ -680,12 +691,12 @@ func TestParseInteger_Base16(t *testing.T) { TEST_LOG_C %{NUMBER:myfloat} %{BASE10OR16NUM:response_code:int} %{IPORHOST:clientip} %{DURATION:rt} `, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`1.25 0xc8 192.168.1.1 5.432µs`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "response_code": int64(200), @@ -693,7 +704,7 @@ func TestParseInteger_Base16(t *testing.T) { "rt": "5.432µs", }, metricA.Fields()) - assert.Equal(t, map[string]string{}, metricA.Tags()) + require.Equal(t, map[string]string{}, metricA.Tags()) } func TestTsModder(t *testing.T) { @@ -701,47 +712,47 @@ func TestTsModder(t *testing.T) { reftime := time.Date(2006, time.December, 1, 1, 1, 1, int(time.Millisecond), time.UTC) modt := tsm.tsMod(reftime) - assert.Equal(t, reftime, modt) + require.Equal(t, reftime, modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Microsecond*1), modt) + require.Equal(t, reftime.Add(time.Microsecond*1), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Microsecond*2), modt) + require.Equal(t, reftime.Add(time.Microsecond*2), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Microsecond*3), modt) + require.Equal(t, reftime.Add(time.Microsecond*3), modt) reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime, modt) + require.Equal(t, reftime, modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*1), modt) + require.Equal(t, reftime.Add(time.Nanosecond*1), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*2), modt) + require.Equal(t, reftime.Add(time.Nanosecond*2), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*3), modt) + require.Equal(t, reftime.Add(time.Nanosecond*3), modt) reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond)*999, time.UTC) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime, modt) + require.Equal(t, reftime, modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*1), modt) + require.Equal(t, reftime.Add(time.Nanosecond*1), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*2), modt) + require.Equal(t, reftime.Add(time.Nanosecond*2), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Nanosecond*3), modt) + require.Equal(t, reftime.Add(time.Nanosecond*3), modt) reftime = time.Date(2006, time.December, 1, 1, 1, 1, 0, time.UTC) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime, modt) + require.Equal(t, reftime, modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Millisecond*1), modt) + require.Equal(t, reftime.Add(time.Millisecond*1), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Millisecond*2), modt) + require.Equal(t, reftime.Add(time.Millisecond*2), modt) modt = tsm.tsMod(reftime) - assert.Equal(t, reftime.Add(time.Millisecond*3), modt) + require.Equal(t, reftime.Add(time.Millisecond*3), modt) reftime = time.Time{} modt = tsm.tsMod(reftime) - assert.Equal(t, reftime, modt) + require.Equal(t, reftime, modt) } func TestTsModder_Rollover(t *testing.T) { @@ -752,14 +763,14 @@ func TestTsModder_Rollover(t *testing.T) { for i := 1; i < 1000; i++ { modt = tsm.tsMod(reftime) } - assert.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt) + require.Equal(t, reftime.Add(time.Microsecond*999+time.Nanosecond), modt) reftime = time.Date(2006, time.December, 1, 1, 1, 1, int(time.Microsecond), time.UTC) modt = tsm.tsMod(reftime) for i := 1; i < 1001; i++ { modt = tsm.tsMod(reftime) } - assert.Equal(t, reftime.Add(time.Nanosecond*1000), modt) + require.Equal(t, reftime.Add(time.Nanosecond*1000), modt) } func TestShortPatternRegression(t *testing.T) { @@ -788,12 +799,12 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { CustomPatternFiles: []string{"./testdata/test-patterns"}, Timezone: "", } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -801,21 +812,21 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { @@ -823,13 +834,14 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, CustomPatternFiles: []string{"./testdata/test-patterns"}, Timezone: "Something/Weird", + Log: testutil.Logger{}, } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -837,21 +849,21 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { @@ -860,12 +872,12 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { CustomPatternFiles: []string{"./testdata/test-patterns"}, Timezone: "Europe/Berlin", } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -873,21 +885,21 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano()) } func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { @@ -896,12 +908,12 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { CustomPatternFiles: []string{"./testdata/test-patterns"}, Timezone: "Canada/Eastern", } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -909,21 +921,21 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano()) } func TestTimezoneLocalCompileFileAndParse(t *testing.T) { @@ -932,12 +944,12 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { CustomPatternFiles: []string{"./testdata/test-patterns"}, Timezone: "Local", } - assert.NoError(t, p.Compile()) + require.NoError(t, p.Compile()) metricA, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) require.NotNil(t, metricA) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "clientip": "192.168.1.1", "myfloat": float64(1.25), @@ -945,21 +957,21 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { "myint": int64(101), }, metricA.Fields()) - assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) - assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) + require.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) + require.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) - assert.NoError(t, err) - assert.Equal(t, + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "myfloat": 1.25, "mystring": "mystring", "nomodifier": "nomodifier", }, metricB.Fields()) - assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) + require.Equal(t, map[string]string{}, metricB.Tags()) + require.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) } func TestNewlineInPatterns(t *testing.T) { @@ -1087,7 +1099,8 @@ func TestEmptyYearInTimestamp(t *testing.T) { `, } require.NoError(t, p.Compile()) - p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0") + _, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0") + require.NoError(t, err) m, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: objc[6504]: Object descriptor was null.") require.NoError(t, err) require.NotNil(t, m) diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index e8e0357fdb33f..65ac40d579422 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -7,8 +7,9 @@ import ( "io" "testing" - "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers/influx" ) type TestingHandler struct { @@ -1950,7 +1951,10 @@ type MockHandler struct { } func (h *MockHandler) SetMeasurement(name []byte) error { - h.TestingHandler.SetMeasurement(name) + err := h.TestingHandler.SetMeasurement(name) + if err != nil { + return err + } return h.SetMeasurementF(name) } @@ -1963,8 +1967,7 @@ func (h *MockHandler) AddInt(name, value []byte) error { if err != nil { return err } - h.TestingHandler.AddInt(name, value) - return nil + return h.TestingHandler.AddInt(name, value) } func (h *MockHandler) AddUint(name, value []byte) error { @@ -1972,8 +1975,7 @@ func (h *MockHandler) AddUint(name, value []byte) error { if err != nil { return err } - h.TestingHandler.AddUint(name, value) - return nil + return h.TestingHandler.AddUint(name, value) } func (h *MockHandler) AddFloat(name, value []byte) error { diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index c5a39801782c1..422736b386aa9 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -9,10 +9,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var DefaultTime = func() time.Time { @@ -849,7 +850,10 @@ func TestStreamParserProducesAllAvailableMetrics(t *testing.T) { parser := NewStreamParser(r) parser.SetTimeFunc(DefaultTime) - go w.Write([]byte("metric value=1\nmetric2 value=1\n")) + go func() { + _, err := w.Write([]byte("metric value=1\nmetric2 value=1\n")) + require.NoError(t, err) + }() _, err := parser.Next() require.NoError(t, err) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 7e138e33adf5c..de0e4d5fef218 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -5,15 +5,15 @@ import ( "encoding/json" "errors" "fmt" - "log" "strconv" "time" + "github.com/tidwall/gjson" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" - "github.com/tidwall/gjson" ) var ( @@ -45,6 +45,8 @@ type Parser struct { timezone string defaultTags map[string]string strict bool + + Log telegraf.Logger `toml:"-"` } func New(config *Config) (*Parser, error) { @@ -110,8 +112,7 @@ func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ( // checks if json_name_key is set if p.nameKey != "" { - switch field := f.Fields[p.nameKey].(type) { - case string: + if field, ok := f.Fields[p.nameKey].(string); ok { name = field } } @@ -172,7 +173,7 @@ func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]inte tags[name] = strconv.FormatFloat(t, 'f', -1, 64) delete(fields, name) default: - log.Printf("E! [parsers.json] Unrecognized type %T", value) + p.Log.Errorf("Unrecognized type %T", value) } } @@ -194,7 +195,7 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { result := gjson.GetBytes(buf, p.query) buf = []byte(result.Raw) if !result.IsArray() && !result.IsObject() && result.Type != gjson.Null { - err := fmt.Errorf("E! Query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type) + err := fmt.Errorf("query path must lead to a JSON object, array of objects or null, but lead to: %v", result.Type) return nil, err } if result.Type == gjson.Null { @@ -292,23 +293,21 @@ func (f *JSONFlattener) FullFlattenJSON( } err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool) if err != nil { - return nil + return err } } case float64: f.Fields[fieldname] = t case string: - if convertString { - f.Fields[fieldname] = v.(string) - } else { + if !convertString { return nil } + f.Fields[fieldname] = v.(string) case bool: - if convertBool { - f.Fields[fieldname] = v.(bool) - } else { + if !convertBool { return nil } + f.Fields[fieldname] = v.(bool) case nil: return nil default: diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 81e116178bf2b..6ae03b97a86f6 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -5,7 +5,6 @@ import ( "bytes" "errors" "fmt" - "log" "os/exec" "regexp" "strconv" @@ -74,6 +73,7 @@ func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, er type NagiosParser struct { MetricName string DefaultTags map[string]string + Log telegraf.Logger `toml:"-"` } // Got from Alignak @@ -111,12 +111,12 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { case 2: ms, err := parsePerfData(string(parts[1]), ts) if err != nil { - log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + p.Log.Errorf("Failed to parse performance data: %s\n", err.Error()) } metrics = append(metrics, ms...) fallthrough case 1: - msg.Write(bytes.TrimSpace(parts[0])) + msg.Write(bytes.TrimSpace(parts[0])) //nolint:revive // from buffer.go: "err is always nil" default: return nil, errors.New("illegal output format") } @@ -126,34 +126,34 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { if bytes.Contains(s.Bytes(), []byte{'|'}) { parts := bytes.Split(s.Bytes(), []byte{'|'}) if longmsg.Len() != 0 { - longmsg.WriteByte('\n') + longmsg.WriteByte('\n') //nolint:revive // from buffer.go: "err is always nil" } - longmsg.Write(bytes.TrimSpace(parts[0])) + longmsg.Write(bytes.TrimSpace(parts[0])) //nolint:revive // from buffer.go: "err is always nil" ms, err := parsePerfData(string(parts[1]), ts) if err != nil { - log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + p.Log.Errorf("Failed to parse performance data: %s\n", err.Error()) } metrics = append(metrics, ms...) break } if longmsg.Len() != 0 { - longmsg.WriteByte('\n') + longmsg.WriteByte('\n') //nolint:revive // from buffer.go: "err is always nil" } - longmsg.Write(bytes.TrimSpace((s.Bytes()))) + longmsg.Write(bytes.TrimSpace(s.Bytes())) //nolint:revive // from buffer.go: "err is always nil" } // Parse extra performance data. for s.Scan() { ms, err := parsePerfData(s.Text(), ts) if err != nil { - log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + p.Log.Errorf("Failed to parse performance data: %s\n", err.Error()) } metrics = append(metrics, ms...) } if s.Err() != nil { - log.Printf("D! [parser.nagios] unexpected io error: %s\n", s.Err()) + p.Log.Debugf("Unexpected io error: %s\n", s.Err()) } // Create nagios state. @@ -291,5 +291,5 @@ func parseThreshold(threshold string) (min float64, max float64, err error) { return 0, 0, ErrBadThresholdFormat } - return + return min, max, err } diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index 2173af15214ba..63284e2182365 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -195,8 +194,8 @@ func TestTryAddState(t *testing.T) { } func assertNagiosState(t *testing.T, m telegraf.Metric, f map[string]interface{}) { - assert.Equal(t, map[string]string{}, m.Tags()) - assert.Equal(t, f, m.Fields()) + require.Equal(t, map[string]string{}, m.Tags()) + require.Equal(t, f, m.Fields()) } func TestParse(t *testing.T) { @@ -219,11 +218,11 @@ with three lines require.NoError(t, err) require.Len(t, metrics, 3) // rta - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "ms", "perfdata": "rta", }, metrics[0].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.298), "warning_lt": float64(0), "warning_gt": float64(4000), @@ -233,11 +232,11 @@ with three lines }, metrics[0].Fields()) // pl - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "%", "perfdata": "pl", }, metrics[1].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0), "warning_lt": float64(0), "warning_gt": float64(80), @@ -260,11 +259,11 @@ with three lines require.NoError(t, err) require.Len(t, metrics, 2) // time - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "s", "perfdata": "time", }, metrics[0].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.008457), "min": float64(0), "max": float64(10), @@ -282,10 +281,10 @@ with three lines require.NoError(t, err) require.Len(t, metrics, 2) // time - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "perfdata": "time", }, metrics[0].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.008457), }, metrics[0].Fields()) @@ -301,10 +300,10 @@ with three lines require.NoError(t, err) require.Len(t, metrics, 4) // load1 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "perfdata": "load1", }, metrics[0].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.00), "warning_lt": MinFloat64, "warning_gt": float64(4), @@ -314,10 +313,10 @@ with three lines }, metrics[0].Fields()) // load5 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "perfdata": "load5", }, metrics[1].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.01), "warning_gt": float64(3), "warning_lt": float64(0), @@ -327,10 +326,10 @@ with three lines }, metrics[1].Fields()) // load15 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "perfdata": "load15", }, metrics[2].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(0.05), "warning_lt": float64(0), "warning_gt": float64(2), @@ -382,11 +381,11 @@ with three lines require.NoError(t, err) require.Len(t, metrics, 5) // /=2643MB;5948;5958;0;5968 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "MB", "perfdata": "/", }, metrics[0].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(2643), "warning_lt": float64(0), "warning_gt": float64(5948), @@ -397,11 +396,11 @@ with three lines }, metrics[0].Fields()) // /boot=68MB;88;93;0;98 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "MB", "perfdata": "/boot", }, metrics[1].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(68), "warning_lt": float64(0), "warning_gt": float64(88), @@ -412,11 +411,11 @@ with three lines }, metrics[1].Fields()) // /home=69357MB;253404;253409;0;253414 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "MB", "perfdata": "/home", }, metrics[2].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(69357), "warning_lt": float64(0), "warning_gt": float64(253404), @@ -427,11 +426,11 @@ with three lines }, metrics[2].Fields()) // /var/log=818MB;970;975;0;980 - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "unit": "MB", "perfdata": "/var/log", }, metrics[3].Tags()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "value": float64(818), "warning_lt": float64(0), "warning_gt": float64(970), diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index bc7ea0c636e4d..c38908b805f77 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -11,13 +11,12 @@ import ( "time" "github.com/matttproud/golang_protobuf_extensions/pbutil" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" ) type Parser struct { @@ -119,7 +118,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met fields := make(map[string]interface{}) fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) + fields[metricName+"_sum"] = m.GetSummary().GetSampleSum() met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) metrics = append(metrics, met) @@ -128,7 +127,7 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met fields = make(map[string]interface{}) newTags["quantile"] = fmt.Sprint(q.GetQuantile()) - fields[metricName] = float64(q.GetValue()) + fields[metricName] = q.GetValue() quantileMetric := metric.New("prometheus", newTags, fields, t, common.ValueType(metricType)) metrics = append(metrics, quantileMetric) @@ -142,7 +141,7 @@ func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metri fields := make(map[string]interface{}) fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) + fields[metricName+"_sum"] = m.GetHistogram().GetSampleSum() met := metric.New("prometheus", tags, fields, t, common.ValueType(metricType)) metrics = append(metrics, met) @@ -164,15 +163,15 @@ func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { fields := make(map[string]interface{}) if m.Gauge != nil { if !math.IsNaN(m.GetGauge().GetValue()) { - fields[metricName] = float64(m.GetGauge().GetValue()) + fields[metricName] = m.GetGauge().GetValue() } } else if m.Counter != nil { if !math.IsNaN(m.GetCounter().GetValue()) { - fields[metricName] = float64(m.GetCounter().GetValue()) + fields[metricName] = m.GetCounter().GetValue() } } else if m.Untyped != nil { if !math.IsNaN(m.GetUntyped().GetValue()) { - fields[metricName] = float64(m.GetUntyped().GetValue()) + fields[metricName] = m.GetUntyped().GetValue() } } return fields diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index 52ef2f5a3bed3..9de8a93ae6562 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" ) const ( @@ -69,8 +69,8 @@ func TestParsingValidGauge(t *testing.T) { metrics, err := parse([]byte(validUniqueGauge)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) + require.NoError(t, err) + require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -89,8 +89,8 @@ func TestParsingValidCounter(t *testing.T) { metrics, err := parse([]byte(validUniqueCounter)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) + require.NoError(t, err) + require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -148,8 +148,8 @@ func TestParsingValidSummary(t *testing.T) { metrics, err := parse([]byte(validUniqueSummary)) - assert.NoError(t, err) - assert.Len(t, metrics, 4) + require.NoError(t, err) + require.Len(t, metrics, 4) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -276,8 +276,8 @@ func TestParsingValidHistogram(t *testing.T) { metrics, err := parse([]byte(validUniqueHistogram)) - assert.NoError(t, err) - assert.Len(t, metrics, 9) + require.NoError(t, err) + require.Len(t, metrics, 9) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -309,8 +309,8 @@ func TestDefautTags(t *testing.T) { } metrics, err := parser.Parse([]byte(validUniqueGauge)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) + require.NoError(t, err) + require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -363,7 +363,7 @@ test_counter{label="test"} 1 %d metric, _ := parser.ParseLine(metricsWithTimestamps) testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics()) - assert.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) + require.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) } func parse(buf []byte) ([]telegraf.Metric, error) { @@ -448,7 +448,8 @@ func TestParserProtobufHeader(t *testing.T) { sampleProtoBufData := []uint8{67, 10, 9, 115, 119, 97, 112, 95, 102, 114, 101, 101, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 224, 36, 205, 65, 65, 10, 7, 115, 119, 97, 112, 95, 105, 110, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 0, 63, 65, 66, 10, 8, 115, 119, 97, 112, 95, 111, 117, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 0, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 26, 9, 9, 0, 0, 0, 0, 0, 30, 110, 65, 68, 10, 10, 115, 119, 97, 112, 95, 116, 111, 116, 97, 108, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 104, 153, 205, 65, 67, 10, 9, 115, 119, 97, 112, 95, 117, 115, 101, 100, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 0, 0, 0, 0, 0, 34, 109, 65, 75, 10, 17, 115, 119, 97, 112, 95, 117, 115, 101, 100, 95, 112, 101, 114, 99, 101, 110, 116, 18, 25, 84, 101, 108, 101, 103, 114, 97, 102, 32, 99, 111, 108, 108, 101, 99, 116, 101, 100, 32, 109, 101, 116, 114, 105, 99, 24, 1, 34, 25, 10, 12, 10, 4, 104, 111, 115, 116, 18, 4, 111, 109, 115, 107, 18, 9, 9, 109, 234, 180, 197, 37, 155, 248, 63} ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited") - w.Write(sampleProtoBufData) + _, err := w.Write(sampleProtoBufData) + require.NoError(t, err) })) defer ts.Close() req, err := http.NewRequest("GET", ts.URL, nil) diff --git a/plugins/parsers/prometheusremotewrite/parser_test.go b/plugins/parsers/prometheusremotewrite/parser_test.go index 7417c9f5fddaf..602fe1db92301 100644 --- a/plugins/parsers/prometheusremotewrite/parser_test.go +++ b/plugins/parsers/prometheusremotewrite/parser_test.go @@ -4,10 +4,11 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/prometheus/prometheus/prompb" - "github.com/stretchr/testify/assert" ) func TestParse(t *testing.T) { @@ -35,7 +36,7 @@ func TestParse(t *testing.T) { } inoutBytes, err := prompbInput.Marshal() - assert.NoError(t, err) + require.NoError(t, err) expected := []telegraf.Metric{ testutil.MustMetric( @@ -65,8 +66,8 @@ func TestParse(t *testing.T) { } metrics, err := parser.Parse(inoutBytes) - assert.NoError(t, err) - assert.Len(t, metrics, 2) + require.NoError(t, err) + require.Len(t, metrics, 2) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -86,7 +87,7 @@ func TestDefaultTags(t *testing.T) { } inoutBytes, err := prompbInput.Marshal() - assert.NoError(t, err) + require.NoError(t, err) expected := []telegraf.Metric{ testutil.MustMetric( @@ -109,8 +110,8 @@ func TestDefaultTags(t *testing.T) { } metrics, err := parser.Parse(inoutBytes) - assert.NoError(t, err) - assert.Len(t, metrics, 1) + require.NoError(t, err) + require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } @@ -132,7 +133,7 @@ func TestMetricsWithTimestamp(t *testing.T) { } inoutBytes, err := prompbInput.Marshal() - assert.NoError(t, err) + require.NoError(t, err) expected := []telegraf.Metric{ testutil.MustMetric( @@ -151,7 +152,7 @@ func TestMetricsWithTimestamp(t *testing.T) { } metrics, err := parser.Parse(inoutBytes) - assert.NoError(t, err) - assert.Len(t, metrics, 1) + require.NoError(t, err) + require.Len(t, metrics, 1) testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) } diff --git a/plugins/parsers/value/parser_test.go b/plugins/parsers/value/parser_test.go index 5a74085d82980..6d8184fef1e02 100644 --- a/plugins/parsers/value/parser_test.go +++ b/plugins/parsers/value/parser_test.go @@ -3,49 +3,49 @@ package value import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParseValidValues(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": int64(55), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("64")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": float64(64), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) parser = NewValueParser("value_test", "string", "", nil) metrics, err = parser.Parse([]byte("foobar")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": "foobar", }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("true")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": true, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) } func TestParseMultipleValues(t *testing.T) { @@ -56,13 +56,13 @@ func TestParseMultipleValues(t *testing.T) { 12 999 `)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": int64(999), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) } func TestParseCustomFieldName(t *testing.T) { @@ -70,8 +70,8 @@ func TestParseCustomFieldName(t *testing.T) { parser.FieldName = "penguin" metrics, err := parser.Parse([]byte(`55`)) - assert.NoError(t, err) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, map[string]interface{}{ "penguin": int64(55), }, metrics[0].Fields()) } @@ -79,126 +79,126 @@ func TestParseCustomFieldName(t *testing.T) { func TestParseLineValidValues(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) metric, err := parser.ParseLine("55") - assert.NoError(t, err) - assert.Equal(t, "value_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "value_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "value": int64(55), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) parser = NewValueParser("value_test", "float", "", nil) metric, err = parser.ParseLine("64") - assert.NoError(t, err) - assert.Equal(t, "value_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "value_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "value": float64(64), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) parser = NewValueParser("value_test", "string", "", nil) metric, err = parser.ParseLine("foobar") - assert.NoError(t, err) - assert.Equal(t, "value_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "value_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "value": "foobar", }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) parser = NewValueParser("value_test", "boolean", "", nil) metric, err = parser.ParseLine("true") - assert.NoError(t, err) - assert.Equal(t, "value_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "value_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "value": true, }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) } func TestParseInvalidValues(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55.0")) - assert.Error(t, err) - assert.Len(t, metrics, 0) + require.Error(t, err) + require.Len(t, metrics, 0) parser = NewValueParser("value_test", "float", "", nil) metrics, err = parser.Parse([]byte("foobar")) - assert.Error(t, err) - assert.Len(t, metrics, 0) + require.Error(t, err) + require.Len(t, metrics, 0) parser = NewValueParser("value_test", "boolean", "", nil) metrics, err = parser.Parse([]byte("213")) - assert.Error(t, err) - assert.Len(t, metrics, 0) + require.Error(t, err) + require.Len(t, metrics, 0) } func TestParseLineInvalidValues(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) _, err := parser.ParseLine("55.0") - assert.Error(t, err) + require.Error(t, err) parser = NewValueParser("value_test", "float", "", nil) _, err = parser.ParseLine("foobar") - assert.Error(t, err) + require.Error(t, err) parser = NewValueParser("value_test", "boolean", "", nil) _, err = parser.ParseLine("213") - assert.Error(t, err) + require.Error(t, err) } func TestParseValidValuesDefaultTags(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err := parser.Parse([]byte("55")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": int64(55), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) parser = NewValueParser("value_test", "float", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("64")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": float64(64), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) parser = NewValueParser("value_test", "string", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("foobar")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": "foobar", }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) parser = NewValueParser("value_test", "boolean", "", nil) parser.SetDefaultTags(map[string]string{"test": "tag"}) metrics, err = parser.Parse([]byte("true")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": true, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"test": "tag"}, metrics[0].Tags()) } func TestParseValuesWithNullCharacter(t *testing.T) { parser := NewValueParser("value_test", "integer", "", nil) metrics, err := parser.Parse([]byte("55\x00")) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "value_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "value_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "value": int64(55), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) } diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 4afa199663733..5a268b83fff15 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -9,10 +9,9 @@ import ( var ( ErrEOF = errors.New("EOF") - ErrInvalidTimestamp = errors.New("Invalid timestamp") + ErrInvalidTimestamp = errors.New("invalid timestamp") ) -// Interface for parsing line elements. type ElementParser interface { parse(p *PointParser, pt *Point) error } @@ -116,11 +115,10 @@ func setTimestamp(pt *Point, ts int64, numDigits int) error { ts = ts / 1e3 } else if numDigits != 10 { // must be in seconds, return error if not 0 - if ts == 0 { - ts = getCurrentTime() - } else { + if ts != 0 { return ErrInvalidTimestamp } + ts = getCurrentTime() } pt.Timestamp = ts return nil diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go index ad3e704c58390..6ef509cad3e7a 100644 --- a/plugins/parsers/wavefront/parser.go +++ b/plugins/parsers/wavefront/parser.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "io" - "log" "strconv" "sync" "time" @@ -26,6 +25,7 @@ type Point struct { type WavefrontParser struct { parsers *sync.Pool defaultTags map[string]string + Log telegraf.Logger `toml:"-"` } // PointParser is a thread-unsafe parser and must be kept in a pool. @@ -42,7 +42,7 @@ type PointParser struct { parent *WavefrontParser } -// Returns a slice of ElementParser's for the Graphite format +// NewWavefrontElements returns a slice of ElementParser's for the Graphite format func NewWavefrontElements() []ElementParser { var elements []ElementParser wsParser := WhiteSpaceParser{} @@ -200,7 +200,7 @@ func (p *PointParser) unscan() { func (p *PointParser) unscanTokens(n int) { if n > MaxBufferSize { // just log for now - log.Printf("cannot unscan more than %d tokens", MaxBufferSize) + p.parent.Log.Infof("Cannot unscan more than %d tokens", MaxBufferSize) } p.buf.n += n } @@ -208,7 +208,7 @@ func (p *PointParser) unscanTokens(n int) { func (p *PointParser) reset(buf []byte) { // reset the scan buffer and write new byte p.scanBuf.Reset() - p.scanBuf.Write(buf) + p.scanBuf.Write(buf) //nolint:revive // from buffer.go: "err is always nil" if p.s == nil { p.s = NewScanner(&p.scanBuf) diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go index 0165b499946e0..5b655b73d7165 100644 --- a/plugins/parsers/wavefront/parser_test.go +++ b/plugins/parsers/wavefront/parser_test.go @@ -4,208 +4,209 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func TestParse(t *testing.T) { parser := NewWavefrontParser(nil) parsedMetrics, err := parser.Parse([]byte("test.metric 1")) - assert.NoError(t, err) + require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) - assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) + require.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) + require.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("\u2206test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("\u0394test.delta", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) } func TestParseLine(t *testing.T) { parser := NewWavefrontParser(nil) parsedMetric, err := parser.ParseLine("test.metric 1") - assert.NoError(t, err) + require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) - assert.Equal(t, parsedMetric.Name(), testMetric.Name()) - assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) + require.Equal(t, parsedMetric.Name(), testMetric.Name()) + require.Equal(t, parsedMetric.Fields(), testMetric.Fields()) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) } func TestParseMultiple(t *testing.T) { parser := NewWavefrontParser(nil) parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) - assert.NoError(t, err) + require.NoError(t, err) testMetric1 := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) testMetric2 := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) testMetrics := []telegraf.Metric{testMetric1, testMetric2} - assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) - assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) - assert.EqualValues(t, parsedMetrics[1], testMetrics[1]) + require.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) + require.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) + require.EqualValues(t, parsedMetrics[1], testMetrics[1]) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) - assert.NoError(t, err) + require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} - assert.EqualValues(t, parsedMetrics, testMetrics) + require.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) - assert.NoError(t, err) + require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2} - assert.EqualValues(t, parsedMetrics, testMetrics) + require.EqualValues(t, parsedMetrics, testMetrics) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) - assert.NoError(t, err) + require.NoError(t, err) testMetric1 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) testMetric2 = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) testMetric3 := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} - assert.EqualValues(t, parsedMetrics, testMetrics) + require.EqualValues(t, parsedMetrics, testMetrics) } func TestParseSpecial(t *testing.T) { parser := NewWavefrontParser(nil) parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") - assert.NoError(t, err) + require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetric, testMetric) + require.EqualValues(t, parsedMetric, testMetric) } func TestParseInvalid(t *testing.T) { parser := NewWavefrontParser(nil) _, err := parser.Parse([]byte("test.metric")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.metric string")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.metric 1 string")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.\u2206delta 1")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) - assert.Error(t, err) + require.Error(t, err) _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) - assert.Error(t, err) + require.Error(t, err) } func TestParseDefaultTags(t *testing.T) { parser := NewWavefrontParser(map[string]string{"myDefault": "value1", "another": "test2"}) parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) - assert.NoError(t, err) + require.NoError(t, err) testMetric := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) - assert.NoError(t, err) + require.NoError(t, err) testMetric = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) - assert.EqualValues(t, parsedMetrics[0], testMetric) + require.EqualValues(t, parsedMetrics[0], testMetric) } diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 75ebfd92035c1..ca70a5a3b3096 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -315,26 +315,26 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config if err != nil { return nil, fmt.Errorf("failed to query field value for '%s': %v", name, err) } - path := name + if config.FieldNameExpand { p := p.document.GetNodePath(selectedfield, selected, "_") if len(p) > 0 { - path = p + "_" + name + name = p + "_" + name } } // Check if field name already exists and if so, append an index number. - if _, ok := fields[path]; ok { + if _, ok := fields[name]; ok { for i := 1; ; i++ { - p := path + "_" + strconv.Itoa(i) + p := name + "_" + strconv.Itoa(i) if _, ok := fields[p]; !ok { - path = p + name = p break } } } - fields[path] = v + fields[name] = v } } else { p.debugEmptyQuery("field selection", selected, config.FieldSelection) From 6fa29f29668c3b2d05adca9a6aafd43fb388d2c9 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:55:55 -0700 Subject: [PATCH 076/133] fix: markdown: resolve all markdown issues with a-c (#10169) --- plugins/inputs/activemq/README.md | 8 +- plugins/inputs/aerospike/README.md | 84 ++++++++++---------- plugins/inputs/aliyuncms/README.md | 52 ++++++------ plugins/inputs/amd_rocm_smi/README.md | 16 +++- plugins/inputs/amqp_consumer/README.md | 5 +- plugins/inputs/apache/README.md | 14 ++-- plugins/inputs/apcupsd/README.md | 12 ++- plugins/inputs/aurora/README.md | 18 +++-- plugins/inputs/azure_storage_queue/README.md | 15 ++-- plugins/inputs/bcache/README.md | 12 +-- plugins/inputs/beanstalkd/README.md | 11 ++- plugins/inputs/beat/README.md | 30 ++++--- plugins/inputs/bind/README.md | 31 ++++---- plugins/inputs/bond/README.md | 18 ++--- plugins/inputs/burrow/README.md | 55 +++++++------ plugins/inputs/cassandra/README.md | 48 +++++------ plugins/inputs/ceph/README.md | 44 +++++----- plugins/inputs/cgroup/README.md | 19 ++--- plugins/inputs/chrony/README.md | 38 ++++----- plugins/inputs/cisco_telemetry_mdt/README.md | 17 ++-- plugins/inputs/clickhouse/README.md | 47 +++++------ plugins/inputs/cloud_pubsub/README.md | 11 +-- plugins/inputs/cloud_pubsub_push/README.md | 3 +- plugins/inputs/cloudwatch/README.md | 33 +++++--- plugins/inputs/conntrack/README.md | 23 +++--- plugins/inputs/consul/README.md | 29 ++++--- plugins/inputs/couchbase/README.md | 15 ++-- plugins/inputs/couchdb/README.md | 18 +++-- plugins/inputs/cpu/README.md | 11 +-- plugins/inputs/csgo/README.md | 5 +- 30 files changed, 384 insertions(+), 358 deletions(-) diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md index aba5a7f83ec27..90a8bfc2b3383 100644 --- a/plugins/inputs/activemq/README.md +++ b/plugins/inputs/activemq/README.md @@ -2,7 +2,7 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. -### Configuration: +## Configuration ```toml # Description @@ -33,7 +33,7 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A # insecure_skip_verify = false ``` -### Metrics +## Metrics Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. @@ -47,7 +47,7 @@ Every effort was made to preserve the names based on the XML response from the A - consumer_count - enqueue_count - dequeue_count -+ activemq_topics +- activemq_topics - tags: - name - source @@ -76,7 +76,7 @@ Every effort was made to preserve the names based on the XML response from the A ### Example Output -``` +```shell activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md index 59ff6ed702db7..aed19ed773825 100644 --- a/plugins/inputs/aerospike/README.md +++ b/plugins/inputs/aerospike/README.md @@ -9,7 +9,8 @@ The metric names, to make it less complicated in querying, have replaced all `-` All metrics are attempted to be cast to integers, then booleans, then strings. -### Configuration: +## Configuration + ```toml # Read stats from aerospike server(s) [[inputs.aerospike]] @@ -48,68 +49,66 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow # the ability to squash this to a smaller number of buckets - # To have a balanced histogram, the number of buckets chosen + # To have a balanced histogram, the number of buckets chosen # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 ``` -### Measurements: +## Measurements The aerospike metrics are under a few measurement names: ***aerospike_node***: These are the aerospike **node** measurements, which are available from the aerospike `statistics` command. - ie, - ``` - telnet localhost 3003 - statistics - ... - ``` +```text + telnet localhost 3003 + statistics + ... +``` ***aerospike_namespace***: These are aerospike namespace measurements, which are available from the aerospike `namespace/` command. - ie, - ``` - telnet localhost 3003 - namespaces - ;;etc. - namespace/ - ... - ``` +```text + telnet localhost 3003 + namespaces + ;;etc. + namespace/ + ... +``` + ***aerospike_set***: These are aerospike set measurements, which are available from the aerospike `sets//` command. - ie, - ``` - telnet localhost 3003 - sets - sets/ - sets// - ... - ``` +```text + telnet localhost 3003 + sets + sets/ + sets// + ... +``` + ***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=ttl` command. - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=ttl - histogram:namespace=;[set=;]type=ttl - ... - ``` +```text + telnet localhost 3003 + histogram:namespace=;type=ttl + histogram:namespace=;[set=;]type=ttl + ... +``` + ***aerospike_histogram_object_size_linear***: These are aerospike object size linear histogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=object_size_linear` command. - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=object_size_linear - histogram:namespace=;[set=;]type=object_size_linear - ... - ``` +```text + telnet localhost 3003 + histogram:namespace=;type=object_size_linear + histogram:namespace=;[set=;]type=object_size_linear + ... +``` -### Tags: +### Tags All measurements have tags: @@ -126,13 +125,14 @@ Set metrics have tags: - set_name Histogram metrics have tags: + - namespace_name - set_name (optional) - type -### Example Output: +## Example Output -``` +```shell % telegraf --input-filter aerospike --test > aerospike_node,aerospike_host=localhost:3000,node_name="BB9020011AC4202" batch_error=0i,batch_index_complete=0i,batch_index_created_buffers=0i,batch_index_destroyed_buffers=0i,batch_index_error=0i,batch_index_huge_buffers=0i,batch_index_initiate=0i,batch_index_queue="0:0,0:0,0:0,0:0",batch_index_timeout=0i,batch_index_unused_buffers=0i,batch_initiate=0i,batch_queue=0i,batch_timeout=0i,client_connections=6i,cluster_integrity=true,cluster_key="8AF422E05281249E",cluster_size=1i,delete_queue=0i,demarshal_error=0i,early_tsvc_batch_sub_error=0i,early_tsvc_client_error=0i,early_tsvc_udf_sub_error=0i,fabric_connections=16i,fabric_msgs_rcvd=0i,fabric_msgs_sent=0i,heartbeat_connections=0i,heartbeat_received_foreign=0i,heartbeat_received_self=0i,info_complete=47i,info_queue=0i,migrate_allowed=true,migrate_partitions_remaining=0i,migrate_progress_recv=0i,migrate_progress_send=0i,objects=0i,paxos_principal="BB9020011AC4202",proxy_in_progress=0i,proxy_retry=0i,query_long_running=0i,query_short_running=0i,reaped_fds=0i,record_refs=0i,rw_in_progress=0i,scans_active=0i,sindex_gc_activity_dur=0i,sindex_gc_garbage_cleaned=0i,sindex_gc_garbage_found=0i,sindex_gc_inactivity_dur=0i,sindex_gc_list_creation_time=0i,sindex_gc_list_deletion_time=0i,sindex_gc_locktimedout=0i,sindex_gc_objects_validated=0i,sindex_ucgarbage_found=0i,sub_objects=0i,system_free_mem_pct=92i,system_swapping=false,tsvc_queue=0i,uptime=1457i 1468923222000000000 > aerospike_namespace,aerospike_host=localhost:3000,namespace=test,node_name="BB9020011AC4202" allow_nonxdr_writes=true,allow_xdr_writes=true,available_bin_names=32768i,batch_sub_proxy_complete=0i,batch_sub_proxy_error=0i,batch_sub_proxy_timeout=0i,batch_sub_read_error=0i,batch_sub_read_not_found=0i,batch_sub_read_success=0i,batch_sub_read_timeout=0i,batch_sub_tsvc_error=0i,batch_sub_tsvc_timeout=0i,client_delete_error=0i,client_delete_not_found=0i,client_delete_success=0i,client_delete_timeout=0i,client_lang_delete_success=0i,client_lang_error=0i,client_lang_read_success=0i,client_lang_write_success=0i,client_proxy_complete=0i,client_proxy_error=0i,client_proxy_timeout=0i,client_read_error=0i,client_read_not_found=0i,client_read_success=0i,client_read_timeout=0i,client_tsvc_error=0i,client_tsvc_timeout=0i,client_udf_complete=0i,client_udf_error=0i,client_udf_timeout=0i,client_write_error=0i,client_write_success=0i,client_write_timeout=0i,cold_start_evict_ttl=4294967295i,conflict_resolution_policy="generation",current_time=206619222i,data_in_index=false,default_ttl=432000i,device_available_pct=99i,device_free_pct=100i,device_total_bytes=4294967296i,device_used_bytes=0i,disallow_null_setname=false,enable_benchmarks_batch_sub=false,enable_benchmarks_read=false,enable_benchmarks_storage=false,enable_benchmarks_udf=false,enable_benchmarks_udf_sub=false,enable_benchmarks_write=false,enable_hist_proxy=false,enable_xdr=false,evict_hist_buckets=10000i,evict_tenths_pct=5i,evict_ttl=0i,evicted_objects=0i,expired_objects=0i,fail_generation=0i,fail_key_busy=0i,fail_record_too_big=0i,fail_xdr_forbidden=0i,geo2dsphere_within.earth_radius_meters=6371000i,geo2dsphere_within.level_mod=1i,geo2dsphere_within.max_cells=12i,geo2dsphere_within.max_level=30i,geo2dsphere_within.min_level=1i,geo2dsphere_within.strict=true,geo_region_query_cells=0i,geo_region_query_falsepos=0i,geo_region_query_points=0i,geo_region_query_reqs=0i,high_water_disk_pct=50i,high_water_memory_pct=60i,hwm_breached=false,ldt_enabled=false,ldt_gc_rate=0i,ldt_page_size=8192i,master_objects=0i,master_sub_objects=0i,max_ttl=315360000i,max_void_time=0i,memory_free_pct=100i,memory_size=1073741824i,memory_used_bytes=0i,memory_used_data_bytes=0i,memory_used_index_bytes=0i,memory_used_sindex_bytes=0i,migrate_order=5i,migrate_record_receives=0i,migrate_record_retransmits=0i,migrate_records_skipped=0i,migrate_records_transmitted=0i,migrate_rx_instances=0i,migrate_rx_partitions_active=0i,migrate_rx_partitions_initial=0i,migrate_rx_partitions_remaining=0i,migrate_sleep=1i,migrate_tx_instances=0i,migrate_tx_partitions_active=0i,migrate_tx_partitions_imbalance=0i,migrate_tx_partitions_initial=0i,migrate_tx_partitions_remaining=0i,non_expirable_objects=0i,ns_forward_xdr_writes=false,nsup_cycle_duration=0i,nsup_cycle_sleep_pct=0i,objects=0i,prole_objects=0i,prole_sub_objects=0i,query_agg=0i,query_agg_abort=0i,query_agg_avg_rec_count=0i,query_agg_error=0i,query_agg_success=0i,query_fail=0i,query_long_queue_full=0i,query_long_reqs=0i,query_lookup_abort=0i,query_lookup_avg_rec_count=0i,query_lookup_error=0i,query_lookup_success=0i,query_lookups=0i,query_reqs=0i,query_short_queue_full=0i,query_short_reqs=0i,query_udf_bg_failure=0i,query_udf_bg_success=0i,read_consistency_level_override="off",repl_factor=1i,scan_aggr_abort=0i,scan_aggr_complete=0i,scan_aggr_error=0i,scan_basic_abort=0i,scan_basic_complete=0i,scan_basic_error=0i,scan_udf_bg_abort=0i,scan_udf_bg_complete=0i,scan_udf_bg_error=0i,set_deleted_objects=0i,sets_enable_xdr=true,sindex.data_max_memory="ULONG_MAX",sindex.num_partitions=32i,single_bin=false,stop_writes=false,stop_writes_pct=90i,storage_engine="device",storage_engine.cold_start_empty=false,storage_engine.data_in_memory=true,storage_engine.defrag_lwm_pct=50i,storage_engine.defrag_queue_min=0i,storage_engine.defrag_sleep=1000i,storage_engine.defrag_startup_minimum=10i,storage_engine.disable_odirect=false,storage_engine.enable_osync=false,storage_engine.file="/opt/aerospike/data/test.dat",storage_engine.filesize=4294967296i,storage_engine.flush_max_ms=1000i,storage_engine.fsync_max_sec=0i,storage_engine.max_write_cache=67108864i,storage_engine.min_avail_pct=5i,storage_engine.post_write_queue=0i,storage_engine.scheduler_mode="null",storage_engine.write_block_size=1048576i,storage_engine.write_threads=1i,sub_objects=0i,udf_sub_lang_delete_success=0i,udf_sub_lang_error=0i,udf_sub_lang_read_success=0i,udf_sub_lang_write_success=0i,udf_sub_tsvc_error=0i,udf_sub_tsvc_timeout=0i,udf_sub_udf_complete=0i,udf_sub_udf_error=0i,udf_sub_udf_timeout=0i,write_commit_level_override="off",xdr_write_error=0i,xdr_write_success=0i,xdr_write_timeout=0i,{test}_query_hist_track_back=300i,{test}_query_hist_track_slice=10i,{test}_query_hist_track_thresholds="1,8,64",{test}_read_hist_track_back=300i,{test}_read_hist_track_slice=10i,{test}_read_hist_track_thresholds="1,8,64",{test}_udf_hist_track_back=300i,{test}_udf_hist_track_slice=10i,{test}_udf_hist_track_thresholds="1,8,64",{test}_write_hist_track_back=300i,{test}_write_hist_track_slice=10i,{test}_write_hist_track_thresholds="1,8,64" 1468923222000000000 diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md index 4e351ea6d8b37..0f6331d7df57d 100644 --- a/plugins/inputs/aliyuncms/README.md +++ b/plugins/inputs/aliyuncms/README.md @@ -1,12 +1,14 @@ # Alibaba (Aliyun) CloudMonitor Service Statistics Input Plugin + Here and after we use `Aliyun` instead `Alibaba` as it is default naming across web console and docs. This plugin will pull Metric Statistics from Aliyun CMS. -### Aliyun Authentication +## Aliyun Authentication This plugin uses an [AccessKey](https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572) credential for Authentication with the Aliyun OpenAPI endpoint. In the following order the plugin will attempt to authenticate. + 1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`, `role_session_name` is specified 2. AccessKey STS token credential if `access_key_id`, `access_key_secret`, `access_key_sts_token` is specified 3. AccessKey credential if `access_key_id`, `access_key_secret` is specified @@ -15,7 +17,7 @@ In the following order the plugin will attempt to authenticate. 6. Environment variables credential 7. Instance metadata credential -### Configuration: +## Configuration ```toml ## Aliyun Credentials @@ -27,7 +29,7 @@ In the following order the plugin will attempt to authenticate. ## 5) RSA keypair credential ## 6) Environment variables credential ## 7) Instance metadata credential - + # access_key_id = "" # access_key_secret = "" # access_key_sts_token = "" @@ -38,7 +40,7 @@ In the following order the plugin will attempt to authenticate. # role_name = "" ## Specify the ali cloud region list to be queried for metrics and objects discovery - ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm ## Default supported regions are: ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, @@ -46,14 +48,14 @@ In the following order the plugin will attempt to authenticate. ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 ## ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich - ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then ## it will be reported on the start - for example for 'acs_cdn' project: ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) ## Currently, discovery supported for the following projects: ## - acs_ecs_dashboard ## - acs_rds_dashboard ## - acs_slb_dashboard - ## - acs_vpc_eip + ## - acs_vpc_eip regions = ["cn-hongkong"] # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all @@ -66,41 +68,41 @@ In the following order the plugin will attempt to authenticate. # ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) period = "5m" - + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) delay = "1m" - + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" - + ## Metric Statistic Project (required) project = "acs_slb_dashboard" - + ## Maximum requests per second, default value is 200 ratelimit = 200 - + ## How often the discovery API call executed (default 1m) #discovery_interval = "1m" - + ## Metrics to Pull (Required) [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, + ## Metrics names to be requested, ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq names = ["InstanceActiveConnection", "InstanceNewConnection"] - + ## Dimension filters for Metric (these are optional). ## This allows to get additional metric dimension. If dimension is not specified it can be returned or ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) ## Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: + ## You can specify either single dimension: #dimensions = '{"instanceId": "p-example"}' - + ## Or you can specify several dimensions at once: #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - + ## Enrichment tags, can be added from discovery (if supported) ## Notation is : ## To figure out which fields are available, consult the Describe API per project. @@ -111,14 +113,14 @@ In the following order the plugin will attempt to authenticate. # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" # ] ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. - + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery - ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage - ## of discovery scope vs monitoring scope + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope #allow_dps_without_discovery = false ``` -#### Requirements and Terminology +### Requirements and Terminology Plugin Configuration utilizes [preset metric items references](https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0) @@ -128,7 +130,7 @@ Plugin Configuration utilizes [preset metric items references](https://www.aliba - `names` must be preset metric names - `dimensions` must be preset dimension values -### Measurements & Fields: +## Measurements & Fields Each Aliyun CMS Project monitored records a measurement with fields for each available Metric Statistic Project and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -139,9 +141,9 @@ Project and Metrics are represented in [snake case](https://en.wikipedia.org/wik - {metric}_maximum (metric Maximum value) - {metric}_value (metric Value value) -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter aliyuncms --test > aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index ac080974dd274..f33df02c1ab3a 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -2,7 +2,7 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. -### Configuration +## Configuration ```toml # Pulls statistics from AMD GPUs attached to the host @@ -14,7 +14,8 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenComput # timeout = "5s" ``` -### Metrics +## Metrics + - measurement: `amd_rocm_smi` - tags - `name` (entry name assigned by rocm-smi executable) @@ -36,21 +37,28 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenComput - `clocks_current_memory` (integer, Mhz) - `power_draw` (float, Watt) -### Troubleshooting +## Troubleshooting + Check the full output by running `rocm-smi` binary manually. Linux: + ```sh rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json ``` + Please include the output of this command if opening a GitHub issue, together with ROCm version. + ### Example Output -``` + +```shell amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 ``` + ### Limitations and notices + Please notice that this plugin has been developed and tested on a limited number of versions and small set of GPUs. Currently the latest ROCm version tested is 4.3.0. Notice that depending on the device and driver versions the amount of information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and unmarshaling can start failing upon updating ROCm. diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index ff417eb26b67c..b383b723d67dc 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -7,8 +7,9 @@ Metrics are read from a topic exchange using the configured queue and binding_ke Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). For an introduction to AMQP see: -- https://www.rabbitmq.com/tutorials/amqp-concepts.html -- https://www.rabbitmq.com/getstarted.html + +- [amqp - concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html) +- [rabbitmq: getting started](https://www.rabbitmq.com/getstarted.html) The following defaults are known to work with RabbitMQ: diff --git a/plugins/inputs/apache/README.md b/plugins/inputs/apache/README.md index b8822edebf314..710d8cbca7d5e 100644 --- a/plugins/inputs/apache/README.md +++ b/plugins/inputs/apache/README.md @@ -4,7 +4,7 @@ The Apache plugin collects server performance information using the [`mod_status Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). -### Configuration: +## Configuration ```toml # Read Apache status information (mod_status) @@ -29,7 +29,7 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve # insecure_skip_verify = false ``` -### Measurements & Fields: +## Measurements & Fields - apache - BusyWorkers (float) @@ -71,14 +71,14 @@ The following fields are collected from the `Scoreboard`, and represent the numb - scboard_starting (float) - scboard_waiting (float) -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output -``` +```shell apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000 ``` diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md index 97526d7ec3847..eb100a462fd4c 100644 --- a/plugins/inputs/apcupsd/README.md +++ b/plugins/inputs/apcupsd/README.md @@ -2,11 +2,11 @@ This plugin reads data from an apcupsd daemon over its NIS network protocol. -### Requirements +## Requirements apcupsd should be installed and it's daemon should be running. -### Configuration +## Configuration ```toml [[inputs.apcupsd]] @@ -18,7 +18,7 @@ apcupsd should be installed and it's daemon should be running. timeout = "5s" ``` -### Metrics +## Metrics - apcupsd - tags: @@ -43,11 +43,9 @@ apcupsd should be installed and it's daemon should be running. - nominal_power - firmware +## Example output - -### Example output - -``` +```shell apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000 ``` diff --git a/plugins/inputs/aurora/README.md b/plugins/inputs/aurora/README.md index cef7ac6c7e045..90910101f624e 100644 --- a/plugins/inputs/aurora/README.md +++ b/plugins/inputs/aurora/README.md @@ -4,7 +4,7 @@ The Aurora Input Plugin gathers metrics from [Apache Aurora](https://aurora.apac For monitoring recommendations reference [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/) -### Configuration: +## Configuration ```toml [[inputs.aurora]] @@ -32,7 +32,7 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: # insecure_skip_verify = false ``` -### Metrics: +## Metrics - aurora - tags: @@ -42,22 +42,24 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: - Numeric metrics are collected from the `/vars` endpoint; string fields are not gathered. - -### Troubleshooting: +## Troubleshooting Check the Scheduler role, the leader will return a 200 status: -``` + +```shell curl -v http://127.0.0.1:8081/leaderhealth ``` Get available metrics: -``` + +```shell curl http://127.0.0.1:8081/vars ``` -### Example Output: +## Example Output The example output below has been trimmed. -``` + +```shell > aurora,role=leader,scheduler=http://debian-stretch-aurora-coordinator-3.virt:8081 CronBatchWorker_batch_locked_events=0i,CronBatchWorker_batch_locked_events_per_sec=0,CronBatchWorker_batch_locked_nanos_per_event=0,CronBatchWorker_batch_locked_nanos_total=0i,CronBatchWorker_batch_locked_nanos_total_per_sec=0,CronBatchWorker_batch_unlocked_events=0i,CronBatchWorker_batch_unlocked_events_per_sec=0,CronBatchWorker_batch_unlocked_nanos_per_event=0,CronBatchWorker_batch_unlocked_nanos_total=0i,CronBatchWorker_batch_unlocked_nanos_total_per_sec=0,CronBatchWorker_batches_processed=0i,CronBatchWorker_items_processed=0i,CronBatchWorker_last_processed_batch_size=0i,CronBatchWorker_queue_size=0i,TaskEventBatchWorker_batch_locked_events=0i,TaskEventBatchWorker_batch_locked_events_per_sec=0,TaskEventBatchWorker_batch_locked_nanos_per_event=0,TaskEventBatchWorker_batch_locked_nanos_total=0i,TaskEventBatchWorker_batch_locked_nanos_total_per_sec=0,TaskEventBatchWorker_batch_unlocked_events=0i,TaskEventBatchWorker_batch_unlocked_events_per_sec=0,TaskEventBatchWorker_batch_unlocked_nanos_per_event=0,TaskEventBatchWorker_batch_unlocked_nanos_total=0i,TaskEventBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskEventBatchWorker_batches_processed=0i,TaskEventBatchWorker_items_processed=0i,TaskEventBatchWorker_last_processed_batch_size=0i,TaskEventBatchWorker_queue_size=0i,TaskGroupBatchWorker_batch_locked_events=0i,TaskGroupBatchWorker_batch_locked_events_per_sec=0,TaskGroupBatchWorker_batch_locked_nanos_per_event=0,TaskGroupBatchWorker_batch_locked_nanos_total=0i,TaskGroupBatchWorker_batch_locked_nanos_total_per_sec=0,TaskGroupBatchWorker_batch_unlocked_events=0i,TaskGroupBatchWorker_batch_unlocked_events_per_sec=0,TaskGroupBatchWorker_batch_unlocked_nanos_per_event=0,TaskGroupBatchWorker_batch_unlocked_nanos_total=0i,TaskGroupBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskGroupBatchWorker_batches_processed=0i,TaskGroupBatchWorker_items_processed=0i,TaskGroupBatchWorker_last_processed_batch_size=0i,TaskGroupBatchWorker_queue_size=0i,assigner_launch_failures=0i,async_executor_uncaught_exceptions=0i,async_tasks_completed=1i,cron_job_collisions=0i,cron_job_concurrent_runs=0i,cron_job_launch_failures=0i,cron_job_misfires=0i,cron_job_parse_failures=0i,cron_job_triggers=0i,cron_jobs_loaded=1i,empty_slots_dedicated_large=0i,empty_slots_dedicated_medium=0i,empty_slots_dedicated_revocable_large=0i,empty_slots_dedicated_revocable_medium=0i,empty_slots_dedicated_revocable_small=0i,empty_slots_dedicated_revocable_xlarge=0i,empty_slots_dedicated_small=0i,empty_slots_dedicated_xlarge=0i,empty_slots_large=0i,empty_slots_medium=0i,empty_slots_revocable_large=0i,empty_slots_revocable_medium=0i,empty_slots_revocable_small=0i,empty_slots_revocable_xlarge=0i,empty_slots_small=0i,empty_slots_xlarge=0i,event_bus_dead_events=0i,event_bus_exceptions=1i,framework_registered=1i,globally_banned_offers_size=0i,http_200_responses_events=55i,http_200_responses_events_per_sec=0,http_200_responses_nanos_per_event=0,http_200_responses_nanos_total=310416694i,http_200_responses_nanos_total_per_sec=0,job_update_delete_errors=0i,job_update_recovery_errors=0i,job_update_state_change_errors=0i,job_update_store_delete_all_events=1i,job_update_store_delete_all_events_per_sec=0,job_update_store_delete_all_nanos_per_event=0,job_update_store_delete_all_nanos_total=1227254i,job_update_store_delete_all_nanos_total_per_sec=0,job_update_store_fetch_details_query_events=74i,job_update_store_fetch_details_query_events_per_sec=0,job_update_store_fetch_details_query_nanos_per_event=0,job_update_store_fetch_details_query_nanos_total=24643149i,job_update_store_fetch_details_query_nanos_total_per_sec=0,job_update_store_prune_history_events=59i,job_update_store_prune_history_events_per_sec=0,job_update_store_prune_history_nanos_per_event=0,job_update_store_prune_history_nanos_total=262868218i,job_update_store_prune_history_nanos_total_per_sec=0,job_updates_pruned=0i,jvm_available_processors=2i,jvm_class_loaded_count=6707i,jvm_class_total_loaded_count=6732i,jvm_class_unloaded_count=25i,jvm_gc_PS_MarkSweep_collection_count=2i,jvm_gc_PS_MarkSweep_collection_time_ms=223i,jvm_gc_PS_Scavenge_collection_count=27i,jvm_gc_PS_Scavenge_collection_time_ms=1691i,jvm_gc_collection_count=29i,jvm_gc_collection_time_ms=1914i,jvm_memory_free_mb=65i,jvm_memory_heap_mb_committed=157i,jvm_memory_heap_mb_max=446i,jvm_memory_heap_mb_used=91i,jvm_memory_max_mb=446i,jvm_memory_mb_total=157i,jvm_memory_non_heap_mb_committed=50i,jvm_memory_non_heap_mb_max=0i,jvm_memory_non_heap_mb_used=49i,jvm_threads_active=47i,jvm_threads_daemon=28i,jvm_threads_peak=48i,jvm_threads_started=62i,jvm_time_ms=1526530686927i,jvm_uptime_secs=79947i,log_entry_serialize_events=16i,log_entry_serialize_events_per_sec=0,log_entry_serialize_nanos_per_event=0,log_entry_serialize_nanos_total=4815321i,log_entry_serialize_nanos_total_per_sec=0,log_manager_append_events=16i,log_manager_append_events_per_sec=0,log_manager_append_nanos_per_event=0,log_manager_append_nanos_total=506453428i,log_manager_append_nanos_total_per_sec=0,log_manager_deflate_events=14i,log_manager_deflate_events_per_sec=0,log_manager_deflate_nanos_per_event=0,log_manager_deflate_nanos_total=21010565i,log_manager_deflate_nanos_total_per_sec=0 1526530687000000000 ``` diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md index 905e85e4cdea6..c080be4561605 100644 --- a/plugins/inputs/azure_storage_queue/README.md +++ b/plugins/inputs/azure_storage_queue/README.md @@ -2,7 +2,7 @@ This plugin gathers sizes of Azure Storage Queues. -### Configuration: +## Configuration ```toml # Description @@ -12,12 +12,13 @@ This plugin gathers sizes of Azure Storage Queues. ## Required Azure Storage Account access key account_key = "storageaccountaccesskey" - + ## Set to false to disable peeking age of oldest message (executes faster) # peek_oldest_message_age = true ``` -### Metrics +## Metrics + - azure_storage_queues - tags: - queue @@ -26,10 +27,10 @@ This plugin gathers sizes of Azure Storage Queues. - size (integer, count) - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue. Requires `peek_oldest_message_age` to be configured to `true`. - -### Example Output -``` +## Example Output + +```shell azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000 azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md index 88c9f14f9236a..0937adcfc5d07 100644 --- a/plugins/inputs/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -2,7 +2,7 @@ Get bcache stat from stats_total directory and dirty_data file. -# Measurements +## Measurements Meta: @@ -20,9 +20,9 @@ Measurement names: - cache_misses - cache_readaheads -### Description +## Description -``` +```text dirty_data Amount of dirty data for this backing device in the cache. Continuously updated unlike the cache set's version, but may be slightly off. @@ -51,7 +51,7 @@ cache_readaheads Count of times readahead occurred. ``` -# Example output +## Example Using this configuration: @@ -69,13 +69,13 @@ Using this configuration: When run with: -``` +```shell ./telegraf --config telegraf.conf --input-filter bcache --test ``` It produces: -``` +```shell * Plugin: bcache, Collection 1 > [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194 > [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832 diff --git a/plugins/inputs/beanstalkd/README.md b/plugins/inputs/beanstalkd/README.md index e4fe2203d8d9b..3b371989446f7 100644 --- a/plugins/inputs/beanstalkd/README.md +++ b/plugins/inputs/beanstalkd/README.md @@ -2,7 +2,7 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). -### Configuration: +## Configuration ```toml [[inputs.beanstalkd]] @@ -14,11 +14,12 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by tubes = ["notifications"] ``` -### Metrics: +## Metrics Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output. `beanstalkd_overview` – statistical information about the system as a whole + - fields - cmd_delete - cmd_pause_tube @@ -38,6 +39,7 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) `beanstalkd_tube` – statistical information about the specified tube + - fields - binlog_current_index - binlog_max_size @@ -90,8 +92,9 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) - version -### Example Output: -``` +## Example + +```shell beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000 beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000 diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md index d819b5ab950b8..5f51271dad73d 100644 --- a/plugins/inputs/beat/README.md +++ b/plugins/inputs/beat/README.md @@ -1,7 +1,10 @@ # Beat Input Plugin + The Beat plugin will collect metrics from the given Beat instances. It is known to work with Filebeat and Kafkabeat. -### Configuration: + +## Configuration + ```toml ## An URL from which to read Beat-formatted JSON ## Default is "http://127.0.0.1:5066". @@ -35,9 +38,11 @@ known to work with Filebeat and Kafkabeat. ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` -### Measurements & Fields + +## Measurements & Fields + - **beat** - * Fields: + - Fields: - cpu_system_ticks - cpu_system_time_ms - cpu_total_ticks @@ -50,7 +55,7 @@ known to work with Filebeat and Kafkabeat. - memstats_memory_alloc - memstats_memory_total - memstats_rss - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -58,7 +63,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_filebeat** - * Fields: + - Fields: - events_active - events_added - events_done @@ -69,7 +74,7 @@ known to work with Filebeat and Kafkabeat. - harvester_started - input_log_files_renamed - input_log_files_truncated - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -77,7 +82,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_libbeat** - * Fields: + - Fields: - config_module_running - config_module_starts - config_module_stops @@ -105,7 +110,7 @@ known to work with Filebeat and Kafkabeat. - pipeline_events_retry - pipeline_events_total - pipeline_queue_acked - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -113,7 +118,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_system** - * Field: + - Field: - cpu_cores - load_1 - load_15 @@ -121,15 +126,16 @@ known to work with Filebeat and Kafkabeat. - load_norm_1 - load_norm_15 - load_norm_5 - * Tags: + - Tags: - beat_beat - beat_host - beat_id - beat_name - beat_version -### Example Output: -``` +## Example + +```shell $ telegraf --input-filter beat --test > beat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md index d67a02020f527..2ebda282c4b82 100644 --- a/plugins/inputs/bind/README.md +++ b/plugins/inputs/bind/README.md @@ -2,19 +2,19 @@ This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. -### XML Statistics Channel +## XML Statistics Channel Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile flag, and it must be specifically requested via the correct URL. Version 3 statistics are the default (and only) XML format in BIND 9.10+. -### JSON Statistics Channel +## JSON Statistics Channel JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do not enable support for JSON statistics in their BIND packages. -### Configuration: +## Configuration - **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a trailing slash in the URL. Default is "http://localhost:8053/xml/v3". @@ -27,15 +27,16 @@ version and configured statistics channel. | BIND Version | Statistics Format | Example URL | | ------------ | ----------------- | ----------------------------- | -| 9.6 - 9.8 | XML v2 | http://localhost:8053 | -| 9.9 | XML v2 | http://localhost:8053/xml/v2 | -| 9.9+ | XML v3 | http://localhost:8053/xml/v3 | -| 9.10+ | JSON v1 | http://localhost:8053/json/v1 | +| 9.6 - 9.8 | XML v2 | `http://localhost:8053` | +| 9.9 | XML v2 | `http://localhost:8053/xml/v2` | +| 9.9+ | XML v3 | `http://localhost:8053/xml/v3` | +| 9.10+ | JSON v1 | `http://localhost:8053/json/v1` | -#### Configuration of BIND Daemon +### Configuration of BIND Daemon Add the following to your named.conf if running Telegraf on the same host as the BIND daemon: -``` + +```json statistics-channels { inet 127.0.0.1 port 8053; }; @@ -46,7 +47,7 @@ configure the BIND daemon to listen on that address. Note that you should secure channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual for more information. -### Measurements & Fields: +## Measurements & Fields - bind_counter - name=value (multiple) @@ -60,7 +61,7 @@ for more information. - total - in_use -### Tags: +## Tags - All measurements - url @@ -73,7 +74,7 @@ for more information. - id - name -### Sample Queries: +## Sample Queries These are some useful queries (to generate dashboards or other) to run against data from this plugin: @@ -84,7 +85,7 @@ WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \ GROUP BY time(5m), "type" ``` -``` +```text name: bind_counter tags: type=qtype time non_negative_derivative_A non_negative_derivative_PTR @@ -104,11 +105,11 @@ time non_negative_derivative_A non_negative_derivative_PTR 1553865600000000000 280.6666666667443 1807.9071428570896 ``` -### Example Output +## Example Output Here is example output of this plugin: -``` +```shell bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000 diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index d905038a9d533..9227df2bac61c 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -4,7 +4,7 @@ The Bond input plugin collects network bond interface status for both the network bond interface as well as slave interfaces. The plugin collects these metrics from `/proc/net/bonding/*` files. -### Configuration: +## Configuration ```toml [[inputs.bond]] @@ -18,7 +18,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. # bond_interfaces = ["bond0"] ``` -### Measurements & Fields: +## Measurements & Fields - bond - active_slave (for active-backup mode) @@ -29,9 +29,9 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - status - count -### Description: +## Description -``` +```shell active_slave Currently active slave interface for active-backup mode. @@ -45,7 +45,7 @@ count Number of slaves attached to bond ``` -### Tags: +## Tags - bond - bond @@ -54,11 +54,11 @@ count - bond - interface -### Example output: +## Example output Configuration: -``` +```toml [[inputs.bond]] ## Sets 'proc' directory path ## If not specified, then default is /proc @@ -72,13 +72,13 @@ Configuration: Run: -``` +```shell telegraf --config telegraf.conf --input-filter bond --test ``` Output: -``` +```shell * Plugin: inputs.bond, Collection 1 > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md index 1d763a430455f..2bdddf28cfd57 100644 --- a/plugins/inputs/burrow/README.md +++ b/plugins/inputs/burrow/README.md @@ -5,7 +5,7 @@ via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/l Supported Burrow version: `1.x` -### Configuration +## Configuration ```toml [[inputs.burrow]] @@ -50,7 +50,7 @@ Supported Burrow version: `1.x` # insecure_skip_verify = false ``` -### Group/Partition Status mappings +## Group/Partition Status mappings * `OK` = 1 * `NOT_FOUND` = 2 @@ -61,42 +61,41 @@ Supported Burrow version: `1.x` > unknown value will be mapped to 0 -### Fields +## Fields * `burrow_group` (one event per each consumer group) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - partition_count (int, `number of partitions`) - - offset (int64, `total offset of all partitions`) - - total_lag (int64, `totallag`) - - lag (int64, `maxlag.current_lag || 0`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * partition_count (int, `number of partitions`) + * offset (int64, `total offset of all partitions`) + * total_lag (int64, `totallag`) + * lag (int64, `maxlag.current_lag || 0`) + * timestamp (int64, `end.timestamp`) * `burrow_partition` (one event per each topic partition) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - lag (int64, `current_lag || 0`) - - offset (int64, `end.timestamp`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * lag (int64, `current_lag || 0`) + * offset (int64, `end.timestamp`) + * timestamp (int64, `end.timestamp`) * `burrow_topic` (one event per topic offset) - - offset (int64) + * offset (int64) - -### Tags +## Tags * `burrow_group` - - cluster (string) - - group (string) + * cluster (string) + * group (string) * `burrow_partition` - - cluster (string) - - group (string) - - topic (string) - - partition (int) - - owner (string) + * cluster (string) + * group (string) + * topic (string) + * partition (int) + * owner (string) * `burrow_topic` - - cluster (string) - - topic (string) - - partition (int) + * cluster (string) + * topic (string) + * partition (int) diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index 56c36bfe93d21..a68f07f2280e2 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -1,19 +1,21 @@ # Cassandra Input Plugin -### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration. +**Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration. + +## Plugin arguments -#### Plugin arguments: - **context** string: Context root used for jolokia url -- **servers** []string: List of servers with the format ":port" +- **servers** []string: List of servers with the format `:port`" - **metrics** []string: List of Jmx paths that identify mbeans attributes -#### Description +## Description The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. -See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) +See: [https://jolokia.org/](https://jolokia.org/) and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) + +## Measurements -# Measurements: Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name. Given a configuration like: @@ -43,30 +45,30 @@ Given a configuration like: The collected metrics will be: -``` +```shell javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084 ``` -# Useful Metrics: +## Useful Metrics Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web. - [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics) - [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) -#### measurement = javaGarbageCollector +### measurement = javaGarbageCollector - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount - /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount -#### measurement = javaMemory +### measurement = javaMemory - /java.lang:type=Memory/HeapMemoryUsage - /java.lang:type=Memory/NonHeapMemoryUsage -#### measurement = cassandraCache +### measurement = cassandraCache - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests @@ -79,11 +81,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity -#### measurement = cassandraClient +### measurement = cassandraClient - /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients -#### measurement = cassandraClientRequest +### measurement = cassandraClientRequest - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency @@ -96,24 +98,25 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures -#### measurement = cassandraCommitLog +### measurement = cassandraCommitLog - /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize -#### measurement = cassandraCompaction +### measurement = cassandraCompaction - /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks - /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted -#### measurement = cassandraStorage +### measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load - /org.apache.cassandra.metrics:type=Storage,name=Exceptions -#### measurement = cassandraTable +### measurement = cassandraTable + Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed @@ -124,20 +127,17 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency - -#### measurement = cassandraThreadPools +### measurement = cassandraThreadPools - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks - - diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 5d5afadc19fad..3d1745884b171 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -4,7 +4,7 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluste Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](https://docs.ceph.com/en/latest/mgr/telegraf/) -*Admin Socket Stats* +## Admin Socket Stats This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** @@ -26,23 +26,22 @@ used as collection tags, and all sub-keys are flattened. For example: Would be parsed into the following metrics, all of which would be tagged with collection=paxos: - - refresh = 9363435 - - refresh_latency.avgcount: 9363435 - - refresh_latency.sum: 5378.794002000 +- refresh = 9363435 +- refresh_latency.avgcount: 9363435 +- refresh_latency.sum: 5378.794002000 - -*Cluster Stats* +## Cluster Stats This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work in conjunction to specify these prerequisites). It may be run on any server you wish which has access to the cluster. The currently supported commands are: -* ceph status -* ceph df -* ceph osd pool stats +- ceph status +- ceph df +- ceph osd pool stats -### Configuration: +## Configuration ```toml # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. @@ -89,9 +88,9 @@ the cluster. The currently supported commands are: gather_cluster_stats = false ``` -### Metrics: +## Metrics -*Admin Socket Stats* +### Admin Socket All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. @@ -167,9 +166,9 @@ All admin measurements will have the following tags: - throttle-objecter_ops - throttle-rgw_async_rados_ops -*Cluster Stats* +## Cluster -+ ceph_health +- ceph_health - fields: - status - overall_status @@ -184,7 +183,7 @@ All admin measurements will have the following tags: - nearfull (bool) - num_remapped_pgs (float) -+ ceph_pgmap +- ceph_pgmap - fields: - version (float) - num_pgs (float) @@ -204,7 +203,7 @@ All admin measurements will have the following tags: - fields: - count (float) -+ ceph_usage +- ceph_usage - fields: - total_bytes (float) - total_used_bytes (float) @@ -223,7 +222,7 @@ All admin measurements will have the following tags: - percent_used (float) - max_avail (float) -+ ceph_pool_stats +- ceph_pool_stats - tags: - name - fields: @@ -236,12 +235,11 @@ All admin measurements will have the following tags: - recovering_bytes_per_sec (float) - recovering_keys_per_sec (float) +## Example -### Example Output: - -*Cluster Stats* +Below is an example of a custer stats: -``` +```shell ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000 ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000 ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000 @@ -251,9 +249,9 @@ ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avai ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000 ``` -*Admin Socket Stats* +Below is an example of admin socket stats: -``` +```shell > ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000 > ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000 > ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 7d0eede0f7f10..4fbb696dbd80e 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -10,38 +10,35 @@ Following file formats are supported: * Single value -``` +```text VAL\n ``` * New line separated values -``` +```text VAL0\n VAL1\n ``` * Space separated values -``` +```text VAL0 VAL1 ...\n ``` * Space separated keys and value, separated by new line -``` +```text KEY0 ... VAL0\n KEY1 ... VAL1\n ``` +## Tags -### Tags: - -All measurements have the following tags: - - path - +All measurements have the `path` tag. -### Configuration: +## Configuration ```toml # Read specific statistics per cgroup @@ -60,7 +57,7 @@ All measurements have the following tags: # files = ["memory.*usage*", "memory.limit_in_bytes"] ``` -### usage examples: +## Example ```toml # [[inputs.cgroup]] diff --git a/plugins/inputs/chrony/README.md b/plugins/inputs/chrony/README.md index aa4f848065297..ebb7ba65c6d49 100644 --- a/plugins/inputs/chrony/README.md +++ b/plugins/inputs/chrony/README.md @@ -51,7 +51,7 @@ Dispersion is due to system clock resolution, statistical measurement variations - Leap status - This is the leap status, which can be Normal, Insert second, Delete second or Not synchronised. -### Configuration: +## Configuration ```toml # Get standard chrony metrics, requires chronyc executable. @@ -60,34 +60,30 @@ Delete second or Not synchronised. # dns_lookup = false ``` -### Measurements & Fields: +## Measurements & Fields - chrony - - system_time (float, seconds) - - last_offset (float, seconds) - - rms_offset (float, seconds) - - frequency (float, ppm) - - residual_freq (float, ppm) - - skew (float, ppm) - - root_delay (float, seconds) - - root_dispersion (float, seconds) - - update_interval (float, seconds) + - system_time (float, seconds) + - last_offset (float, seconds) + - rms_offset (float, seconds) + - frequency (float, ppm) + - residual_freq (float, ppm) + - skew (float, ppm) + - root_delay (float, seconds) + - root_dispersion (float, seconds) + - update_interval (float, seconds) -### Tags: +### Tags - All measurements have the following tags: - - reference_id - - stratum - - leap_status + - reference_id + - stratum + - leap_status -### Example Output: +### Example Output -``` +```shell $ telegraf --config telegraf.conf --input-filter chrony --test * Plugin: chrony, Collection 1 > chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161 ``` - - - - diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index f4ca7243b8cde..b3e641cc3c502 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -9,8 +9,7 @@ The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and lat The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later. - -### Configuration: +## Configuration ```toml [[inputs.cisco_telemetry_mdt]] @@ -53,14 +52,16 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' ``` -### Example Output: -``` +## Example Output + +```shell ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 ``` -### NX-OS Configuration Example: -``` +### NX-OS Configuration Example + +```text Requirement DATA-SOURCE Configuration ----------------------------------------- Environment DME path sys/ch query-condition query-target=subtree&target-subtree-class=eqptPsuSlot,eqptFtSlot,eqptSupCSlot,eqptPsu,eqptFt,eqptSensor,eqptLCSlot @@ -92,13 +93,11 @@ multicast igmp NXAPI show ip igmp snooping groups multicast igmp NXAPI show ip igmp snooping groups detail multicast igmp NXAPI show ip igmp snooping groups summary multicast igmp NXAPI show ip igmp snooping mrouter -multicast igmp NXAPI show ip igmp snooping statistics +multicast igmp NXAPI show ip igmp snooping statistics multicast pim NXAPI show ip pim interface vrf all multicast pim NXAPI show ip pim neighbor vrf all multicast pim NXAPI show ip pim route vrf all multicast pim NXAPI show ip pim rp vrf all multicast pim NXAPI show ip pim statistics vrf all multicast pim NXAPI show ip pim vrf all - - ``` diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index 9b9e6caa904f7..b7bbe85c0de5c 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -2,7 +2,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. -### Configuration +## Configuration + ```toml # Read metrics from one or many ClickHouse servers [[inputs.clickhouse]] @@ -71,7 +72,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic # insecure_skip_verify = false ``` -### Metrics +## Metrics - clickhouse_events - tags: @@ -81,7 +82,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.events][] -+ clickhouse_metrics +- clickhouse_metrics - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -97,7 +98,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.asynchronous_metrics][] -+ clickhouse_tables +- clickhouse_tables - tags: - source (ClickHouse server hostname) - table @@ -115,9 +116,9 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - root_nodes (count of node from [system.zookeeper][] where path=/) + - root_nodes (count of node from [system.zookeeper][] where path=/) -+ clickhouse_replication_queue +- clickhouse_replication_queue - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -132,8 +133,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - shard_num (Shard number in the cluster [optional]) - fields: - detached_parts (total detached parts for all tables and databases from [system.detached_parts][]) - -+ clickhouse_dictionaries + +- clickhouse_dictionaries - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -153,7 +154,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - failed - counter which show total failed mutations from first clickhouse-server run - completed - counter which show total successful finished mutations from first clickhouse-server run -+ clickhouse_disks +- clickhouse_disks - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -161,8 +162,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - name (disk name in storage configuration) - path (path to disk) - fields: - - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes - - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes + - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes + - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes - clickhouse_processes - tags: @@ -170,8 +171,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details - - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details - longest_running - float gauge which show maximum value for `elapsed` field of running processes, see [system.processes][] for details - clickhouse_text_log @@ -179,13 +180,13 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] + - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] - fields: - messages_last_10_min - gauge which show how many messages collected - -### Example Output -``` +### Examples + +```text clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 @@ -196,10 +197,10 @@ clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,hos [system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/ [system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/ [system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/ -[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ +[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ [system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/ -[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ -[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ -[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ -[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ -[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ +[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ +[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ +[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ +[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ +[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index a4244b881cb62..d05fea611ebdd 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -3,8 +3,7 @@ The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub] and creates metrics using one of the supported [input data formats][]. - -### Configuration +## Configuration ```toml [[inputs.cloud_pubsub]] @@ -26,8 +25,8 @@ and creates metrics using one of the supported [input data formats][]. ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. ## If the streaming pull for a PubSub Subscription fails (receiver), ## the agent attempts to restart receiving messages after this many seconds. # retry_delay_seconds = 5 @@ -76,7 +75,7 @@ and creates metrics using one of the supported [input data formats][]. ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 - ## Optional. If true, Telegraf will attempt to base64 decode the + ## Optional. If true, Telegraf will attempt to base64 decode the ## PubSub message data before parsing. Many GCP services that ## output JSON to Google PubSub base64-encode the JSON payload. # base64_data = false @@ -91,8 +90,6 @@ Each plugin agent can listen to one subscription at a time, so you will need to run multiple instances of the plugin to pull messages from multiple subscriptions/topics. - - [pubsub]: https://cloud.google.com/pubsub [pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md index 3173b43361fb6..3163d5bb4ba1b 100644 --- a/plugins/inputs/cloud_pubsub_push/README.md +++ b/plugins/inputs/cloud_pubsub_push/README.md @@ -9,8 +9,7 @@ Enable TLS by specifying the file names of a service TLS certificate and key. Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`. - -### Configuration: +## Configuration This is a sample configuration for the plugin. diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 97592f5197ab7..a904eb1ea7b0e 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -2,10 +2,11 @@ This plugin will pull Metric Statistics from Amazon CloudWatch. -### Amazon Authentication +## Amazon Authentication This plugin uses a credential chain for Authentication with the CloudWatch API endpoint. In the following order the plugin will attempt to authenticate. + 1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) 2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes 3. Shared profile from `profile` attribute @@ -13,7 +14,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. 5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file) 6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) -### Configuration: +## Configuration ```toml # Pull Metric Statistics from Amazon CloudWatch @@ -112,7 +113,8 @@ API endpoint. In the following order the plugin will attempt to authenticate. # name = "LoadBalancerName" # value = "p-example" ``` -#### Requirements and Terminology + +## Requirements and Terminology Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric. @@ -127,7 +129,8 @@ to be retrieved. If specifying >1 dimension, then the metric must contain *all* wildcard dimension is ignored. Example: -``` + +```toml [[inputs.cloudwatch]] period = "1m" interval = "5m" @@ -146,13 +149,14 @@ Example: ``` If the following ELBs are available: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` - name: `q-example`, availabilityZone: `us-east-1a` - name: `q-example`, availabilityZone: `us-east-1b` - Then 2 metrics will be output: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` @@ -161,11 +165,12 @@ would be exported containing the aggregate values of the ELB across availability To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart. -#### Restrictions and Limitations +## Restrictions and Limitations + - CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) - CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/) -### Measurements & Fields: +## Measurements & Fields Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic. Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -177,8 +182,8 @@ Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/w - {metric}_maximum (metric Maximum value) - {metric}_sample_count (metric SampleCount value) +## Tags -### Tags: Each measurement is tagged with the following identifiers to uniquely identify the associated metric Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -186,17 +191,19 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik - region (CloudWatch Region) - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) -### Troubleshooting: +## Troubleshooting You can use the aws cli to get a list of available metrics and dimensions: -``` + +```shell aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name CPUCreditBalance ``` If the expected metrics are not returned, you can try getting them manually for a short period of time: -``` + +```shell aws cloudwatch get-metric-data \ --start-time 2018-07-01T00:00:00Z \ --end-time 2018-07-01T00:15:00Z \ @@ -222,9 +229,9 @@ aws cloudwatch get-metric-data \ ]' ``` -### Example Output: +## Example -``` +```shell $ ./telegraf --config telegraf.conf --input-filter cloudwatch --test > cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 ``` diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md index 2e5fb8861dec1..74f4bd9ed567f 100644 --- a/plugins/inputs/conntrack/README.md +++ b/plugins/inputs/conntrack/README.md @@ -3,23 +3,22 @@ Collects stats from Netfilter's conntrack-tools. The conntrack-tools provide a mechanism for tracking various aspects of -network connections as they are processed by netfilter. At runtime, +network connections as they are processed by netfilter. At runtime, conntrack exposes many of those connection statistics within /proc/sys/net. Depending on your kernel version, these files can be found in either /proc/sys/net/ipv4/netfilter or /proc/sys/net/netfilter and will be -prefixed with either ip_ or nf_. This plugin reads the files specified +prefixed with either ip or nf. This plugin reads the files specified in its configuration and publishes each one as a field, with the prefix -normalized to ip_. +normalized to ip_. In order to simplify configuration in a heterogeneous environment, a superset of directory and filenames can be specified. Any locations that don't exist will be ignored. -For more information on conntrack-tools, see the +For more information on conntrack-tools, see the [Netfilter Documentation](http://conntrack-tools.netfilter.org/). - -### Configuration: +## Configuration ```toml # Collects conntrack stats from the configured directories and files. @@ -38,19 +37,19 @@ For more information on conntrack-tools, see the dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ``` -### Measurements & Fields: +## Measurements & Fields - conntrack - - ip_conntrack_count (int, count): the number of entries in the conntrack table - - ip_conntrack_max (int, size): the max capacity of the conntrack table + - ip_conntrack_count (int, count): the number of entries in the conntrack table + - ip_conntrack_max (int, size): the max capacity of the conntrack table -### Tags: +## Tags This input does not use tags. -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter conntrack --test conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735 ``` diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 71d7d26a8f5eb..609a8dfb8840f 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -6,7 +6,7 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed. -### Configuration: +## Configuration ```toml # Gather health check statuses from services registered in Consul @@ -48,13 +48,15 @@ report those stats already using StatsD protocol if needed. # tag_delimiter = ":" ``` -### Metrics: -##### metric_version = 1: +## Metrics + +### metric_version = 1 + - consul_health_checks - tags: - - node (node that check/service is registered on) - - service_name - - check_id + - node (node that check/service is registered on) + - service_name + - check_id - fields: - check_name - service_id @@ -63,27 +65,28 @@ report those stats already using StatsD protocol if needed. - critical (integer) - warning (integer) -##### metric_version = 2: +### metric_version = 2 + - consul_health_checks - tags: - - node (node that check/service is registered on) - - service_name - - check_id - - check_name + - node (node that check/service is registered on) + - service_name + - check_id + - check_name - service_id - status - fields: - passing (integer) - critical (integer) - warning (integer) - + `passing`, `critical`, and `warning` are integer representations of the health check state. A value of `1` represents that the status was the state of the the health check at this sample. `status` is string representation of the same state. ## Example output -``` +```shell consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902 consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036 ``` diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 1acdaea4ac76e..be39100c7d203 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,8 +1,9 @@ # Couchbase Input Plugin + Couchbase is a distributed NoSQL database. This plugin gets metrics for each Couchbase node, as well as detailed metrics for each bucket, for a given couchbase server. -## Configuration: +## Configuration ```toml # Read per-node and per-bucket metrics from Couchbase @@ -30,25 +31,29 @@ This plugin gets metrics for each Couchbase node, as well as detailed metrics fo # insecure_skip_verify = false ``` -## Measurements: +## Measurements ### couchbase_node Tags: + - cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint` - hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091` Fields: + - memory_free (unit: bytes, example: 23181365248.0) - memory_total (unit: bytes, example: 64424656896.0) ### couchbase_bucket Tags: + - cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) - bucket: the name of the couchbase bucket, e.g., `blastro-df` Default bucket fields: + - quota_percent_used (unit: percent, example: 68.85424936294555) - ops_per_sec (unit: count, example: 5686.789686789687) - disk_fetches (unit: count, example: 0.0) @@ -58,7 +63,8 @@ Default bucket fields: - mem_used (unit: bytes, example: 202156957464.0) Additional fields that can be configured with the `bucket_stats_included` option: -- couch_total_disk_size + +- couch_total_disk_size - couch_docs_fragmentation - couch_views_fragmentation - hit_ratio @@ -274,10 +280,9 @@ Additional fields that can be configured with the `bucket_stats_included` option - swap_total - swap_used - ## Example output -``` +```shell couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000 couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000 ``` diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 3a7f127dbc3db..a7a6a42f9bcae 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -2,7 +2,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. -### Configuration +## Configuration ```toml [[inputs.couchdb]] @@ -15,7 +15,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. # basic_password = "p@ssw0rd" ``` -### Measurements & Fields: +## Measurements & Fields Statistics specific to the internals of CouchDB: @@ -60,19 +60,21 @@ httpd statistics: - httpd_bulk_requests - httpd_view_reads -### Tags: +## Tags - server (url of the couchdb _stats endpoint) -### Example output: +## Example -**Post Couchdb 2.0** -``` +### Post Couchdb 2.0 + +```shell couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits_value=0,httpd_request_methods_delete_value=0,couchdb_auth_cache_misses_value=0,httpd_request_methods_get_value=42,httpd_status_codes_304_value=0,httpd_status_codes_400_value=0,httpd_request_methods_head_value=0,httpd_status_codes_201_value=0,couchdb_database_reads_value=0,httpd_request_methods_copy_value=0,couchdb_request_time_max=0,httpd_status_codes_200_value=42,httpd_status_codes_301_value=0,couchdb_open_os_files_value=2,httpd_request_methods_put_value=0,httpd_request_methods_post_value=0,httpd_status_codes_202_value=0,httpd_status_codes_403_value=0,httpd_status_codes_409_value=0,couchdb_database_writes_value=0,couchdb_request_time_min=0,httpd_status_codes_412_value=0,httpd_status_codes_500_value=0,httpd_status_codes_401_value=0,httpd_status_codes_404_value=0,httpd_status_codes_405_value=0,couchdb_open_databases_value=0 1536707179000000000 ``` -**Pre Couchdb 2.0** -``` +### Pre Couchdb 2.0 + +```shell couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 ``` diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index 8e2ef66f92451..5b82b038d768a 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -2,7 +2,8 @@ The `cpu` plugin gather metrics on the system CPUs. -#### Configuration +## Configuration + ```toml # Read metrics about cpu usage [[inputs.cpu]] @@ -16,7 +17,7 @@ The `cpu` plugin gather metrics on the system CPUs. report_active = false ``` -### Metrics +## Metrics On Linux, consult `man proc` for details on the meanings of these values. @@ -47,14 +48,14 @@ On Linux, consult `man proc` for details on the meanings of these values. - usage_guest (float, percent) - usage_guest_nice (float, percent) -### Troubleshooting +## Troubleshooting On Linux systems the `/proc/stat` file is used to gather CPU times. Percentages are based on the last 2 samples. -### Example Output +## Example Output -``` +```shell cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000 cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000 cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000 diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md index b335509400426..e6fded0fb27ec 100644 --- a/plugins/inputs/csgo/README.md +++ b/plugins/inputs/csgo/README.md @@ -2,7 +2,8 @@ The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. -#### Configuration +## Configuration + ```toml # Fetch metrics from a CSGO SRCDS [[inputs.csgo]] @@ -16,7 +17,7 @@ The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. servers = [] ``` -### Metrics +## Metrics The plugin retrieves the output of the `stats` command that is executed via rcon. From 0c02f245d60b92f2f4b1acc32d1e0d92d36025e5 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:56:26 -0700 Subject: [PATCH 077/133] fix: markdown: resolve all markdown issues with d-f (#10171) --- plugins/inputs/dcos/README.md | 24 ++++---- plugins/inputs/directory_monitor/README.md | 10 ++-- plugins/inputs/disk/README.md | 22 +++---- plugins/inputs/diskio/README.md | 40 +++++++------ plugins/inputs/disque/README.md | 8 +-- plugins/inputs/dmcache/README.md | 44 +++++++------- plugins/inputs/dns_query/README.md | 12 ++-- plugins/inputs/docker/README.md | 69 +++++++++++----------- plugins/inputs/docker_log/README.md | 12 ++-- plugins/inputs/dovecot/README.md | 67 +++++++++++---------- plugins/inputs/dpdk/README.md | 46 +++++++++++---- plugins/inputs/ecs/README.md | 19 +++--- plugins/inputs/elasticsearch/README.md | 15 ++--- plugins/inputs/ethtool/README.md | 8 +-- plugins/inputs/eventhub_consumer/README.md | 8 +-- plugins/inputs/example/README.md | 19 +++--- plugins/inputs/exec/README.md | 15 +++-- plugins/inputs/execd/README.md | 14 ++--- plugins/inputs/fail2ban/README.md | 15 ++--- plugins/inputs/fibaro/README.md | 9 ++- plugins/inputs/file/README.md | 8 +-- plugins/inputs/filecount/README.md | 8 +-- plugins/inputs/filestat/README.md | 20 +++---- plugins/inputs/fireboard/README.md | 14 ++--- plugins/inputs/fluentd/README.md | 27 +++++---- 25 files changed, 293 insertions(+), 260 deletions(-) diff --git a/plugins/inputs/dcos/README.md b/plugins/inputs/dcos/README.md index 4c9d46a921a6b..cd3a0c73929b9 100644 --- a/plugins/inputs/dcos/README.md +++ b/plugins/inputs/dcos/README.md @@ -2,7 +2,7 @@ This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/). -**Series Cardinality Warning** +## Series Cardinality Warning Depending on the work load of your DC/OS cluster, this plugin can quickly create a high number of series which, when unchecked, can cause high load on @@ -18,7 +18,8 @@ your database. - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration: +## Configuration + ```toml [[inputs.dcos]] ## The DC/OS cluster URL. @@ -63,13 +64,14 @@ your database. # path = ["/var/lib/mesos/slave/slaves/*"] ``` -#### Enterprise Authentication +### Enterprise Authentication When using Enterprise DC/OS, it is recommended to use a service account to authenticate with the cluster. The plugin requires the following permissions: -``` + +```text dcos:adminrouter:ops:system-metrics full dcos:adminrouter:ops:mesos full ``` @@ -77,14 +79,15 @@ dcos:adminrouter:ops:mesos full Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/). Quick configuration using the Enterprise CLI: -``` + +```text dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full dcos security org users grant telegraf dcos:adminrouter:ops:mesos full ``` -#### Open Source Authentication +### Open Source Authentication The Open Source DC/OS does not provide service accounts. Instead you can use of the following options: @@ -95,7 +98,8 @@ of the following options: Then `token_file` can be set by using the [dcos cli] to login periodically. The cli can login for at most XXX days, you will need to ensure the cli performs a new login before this time expires. -``` + +```shell dcos auth login --username foo --password bar dcos config show core.dcos_acs_token > ~/.dcos/token ``` @@ -107,7 +111,7 @@ token is compromised it cannot be revoked and may require a full reinstall of the cluster. For more information on this technique reference [this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add). -### Metrics: +## Metrics Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/) for details about field interpretation. @@ -185,9 +189,9 @@ for details about field interpretation. - fields: - fields are application specific -### Example Output: +## Example -``` +```shell dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md index 4e260f44256ed..c4ad4c20e3eed 100644 --- a/plugins/inputs/directory_monitor/README.md +++ b/plugins/inputs/directory_monitor/README.md @@ -5,7 +5,7 @@ The plugin will gather all files in the directory at a configurable interval (`m This plugin is intended to read files that are moved or copied to the monitored directory, and thus files should also not be used by another process or else they may fail to be gathered. Please be advised that this plugin pulls files directly after they've been in the directory for the length of the configurable `directory_duration_threshold`, and thus files should not be written 'live' to the monitored directory. If you absolutely must write files directly, they must be guaranteed to finish writing before the `directory_duration_threshold`. -### Configuration: +## Configuration ```toml [[inputs.directory_monitor]] @@ -22,7 +22,7 @@ This plugin is intended to read files that are moved or copied to the monitored ## The amount of time a file is allowed to sit in the directory before it is picked up. ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, ## set this higher so that the plugin will wait until the file is fully copied to the directory. - # directory_duration_threshold = "50ms" + # directory_duration_threshold = "50ms" # ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. # files_to_monitor = ["^.*\.csv"] @@ -37,11 +37,11 @@ This plugin is intended to read files that are moved or copied to the monitored # ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. - # file_queue_size = 100000 + # file_queue_size = 100000 # ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" # diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index b0a8ac05a6c19..a055a61c98299 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -4,9 +4,9 @@ The disk input plugin gathers metrics about disk usage. Note that `used_percent` is calculated by doing `used / (used + free)`, _not_ `used / total`, which is how the unix `df` command does it. See -https://en.wikipedia.org/wiki/Df_(Unix) for more details. +[wikipedia - df](https://en.wikipedia.org/wiki/Df_(Unix)) for more details. -### Configuration: +## Configuration ```toml [[inputs.disk]] @@ -18,7 +18,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ``` -#### Docker container +### Docker container To monitor the Docker engine host from within a container you will need to mount the host's filesystem into the container and set the `HOST_PROC` @@ -27,11 +27,11 @@ also set the `HOST_MOUNT_PREFIX` environment variable to the prefix containing the `/proc` directory, when present this variable is stripped from the reported `path` tag. -``` +```shell docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - disk - tags: @@ -48,25 +48,27 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_total (integer, files) - inodes_used (integer, files) -### Troubleshooting +## Troubleshooting On Linux, the list of disks is taken from the `/proc/self/mounts` file and a [statfs] call is made on the second column. If any expected filesystems are missing ensure that the `telegraf` user can read these files: -``` + +```shell $ sudo -u telegraf cat /proc/self/mounts | grep sda2 /dev/sda2 /home ext4 rw,relatime,data=ordered 0 0 $ sudo -u telegraf stat /home ``` It may be desired to use POSIX ACLs to provide additional access: -``` + +```shell sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ ``` -### Example Output: +## Example -``` +```shell disk,fstype=hfs,mode=ro,path=/ free=398407520256i,inodes_free=97267461i,inodes_total=121847806i,inodes_used=24580345i,total=499088621568i,used=100418957312i,used_percent=20.131039916242397 1453832006274071563 disk,fstype=devfs,mode=rw,path=/dev free=0i,inodes_free=0i,inodes_total=628i,inodes_used=628i,total=185856i,used=185856i,used_percent=100 1453832006274137913 disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274157077 diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 11e68d6961ee0..1e99e81fef8c5 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -2,7 +2,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. -### Configuration: +## Configuration ```toml # Read metrics about disk IO by device @@ -34,7 +34,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ``` -#### Docker container +### Docker container To monitor the Docker engine host from within a container you will need to mount the host's filesystem into the container and set the `HOST_PROC` @@ -44,11 +44,11 @@ it is required to use privileged mode to provide access to `/dev`. If you are using the `device_tags` or `name_templates` options, you will need to bind mount `/run/udev` into the container. -``` +```shell docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - diskio - tags: @@ -72,16 +72,16 @@ On linux these values correspond to the values in and [`/sys/block//stat`](https://www.kernel.org/doc/Documentation/block/stat.txt). -#### `reads` & `writes`: +### `reads` & `writes` These values increment when an I/O request completes. -#### `read_bytes` & `write_bytes`: +### `read_bytes` & `write_bytes` These values count the number of bytes read from or written to this block device. -#### `read_time` & `write_time`: +### `read_time` & `write_time` These values count the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, @@ -89,49 +89,51 @@ these values will increase at a rate greater than 1000/second; for example, if 60 read requests wait for an average of 30 ms, the read_time field will increase by 60*30 = 1800. -#### `io_time`: +### `io_time` This value counts the number of milliseconds during which the device has had I/O requests queued. -#### `weighted_io_time`: +### `weighted_io_time` This value counts the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, this value will increase as the product of the number of milliseconds times the number of requests waiting (see `read_time` above for an example). -#### `iops_in_progress`: +### `iops_in_progress` This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include I/O requests that are in the queue but not yet issued to the device driver. -#### `merged_reads` & `merged_writes`: +### `merged_reads` & `merged_writes` Reads and writes which are adjacent to each other may be merged for efficiency. Thus two 4K reads may become one 8K read before it is ultimately handed to the disk, and so it will be counted (and queued) as only one I/O. These fields lets you know how often this was done. -### Sample Queries: +## Sample Queries -#### Calculate percent IO utilization per disk and host: -``` +### Calculate percent IO utilization per disk and host + +```sql SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -#### Calculate average queue depth: +### Calculate average queue depth + `iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. -``` + +```sql SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -### Example Output: +## Example -``` +```shell diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000 diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000 diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000 - ``` diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md index ad05658cc2b14..2312bd2c889e3 100644 --- a/plugins/inputs/disque/README.md +++ b/plugins/inputs/disque/README.md @@ -2,11 +2,10 @@ [Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker. - -### Configuration: +## Configuration ```toml -[[inputs.disque]] +[[inputs.disque]] ## An array of URI to gather stats about. Specify an ip or hostname ## with optional port and password. ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -14,8 +13,7 @@ servers = ["localhost"] ``` -### Metrics - +## Metrics - disque - disque_host diff --git a/plugins/inputs/dmcache/README.md b/plugins/inputs/dmcache/README.md index 536d3f518bcaa..bfc30f678bdc2 100644 --- a/plugins/inputs/dmcache/README.md +++ b/plugins/inputs/dmcache/README.md @@ -6,7 +6,7 @@ This plugin requires sudo, that is why you should setup and be sure that the tel `sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes. -### Configuration +## Configuration ```toml [[inputs.dmcache]] @@ -14,33 +14,33 @@ This plugin requires sudo, that is why you should setup and be sure that the tel per_device = true ``` -### Measurements & Fields: +## Measurements & Fields - dmcache - - length - - target - - metadata_blocksize - - metadata_used - - metadata_total - - cache_blocksize - - cache_used - - cache_total - - read_hits - - read_misses - - write_hits - - write_misses - - demotions - - promotions - - dirty - -### Tags: + - length + - target + - metadata_blocksize + - metadata_used + - metadata_total + - cache_blocksize + - cache_used + - cache_total + - read_hits + - read_misses + - write_hits + - write_misses + - demotions + - promotions + - dirty + +## Tags - All measurements have the following tags: - - device + - device -### Example Output: +## Example Output -``` +```shell $ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache * Plugin: inputs.dmcache, Collection 1 > dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000 diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index dc8ddd90373e9..287addc20d8d9 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -2,7 +2,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) -### Configuration: +## Configuration + ```toml # Query given DNS server and gives statistics [[inputs.dns_query]] @@ -26,7 +27,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # timeout = 2 ``` -### Metrics: +## Metrics - dns_query - tags: @@ -40,8 +41,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi - result_code (int, success = 0, timeout = 1, error = 2) - rcode_value (int) +## Rcode Descriptions -### Rcode Descriptions |rcode_value|rcode|Description| |---|-----------|-----------------------------------| |0 | NoError | No Error | @@ -65,9 +66,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi |22 | BADTRUNC | Bad Truncation | |23 | BADCOOKIE | Bad/missing Server Cookie | +### Example -### Example Output: - -``` +```shell dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 ``` diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 8d75e641a1fb4..5a0585b414dca 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -6,7 +6,7 @@ docker containers. The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/). -### Configuration: +## Configuration ```toml # Read metrics about docker containers @@ -46,23 +46,23 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Whether to report for each container per-device blkio (8:0, 8:1...), ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting ## is honored. perdevice = true - + ## Specifies for which classes a per-device metric should be issued ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) ## Please note that this setting has no effect if 'perdevice' is set to 'true' # perdevice_include = ["cpu"] - + ## Whether to report for each container total blkio and network stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting ## is honored. total = false - + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' + ## Possible values are 'cpu', 'blkio' and 'network' ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. ## Please note that this setting has no effect if 'total' is set to 'false' # total_include = ["cpu", "blkio", "network"] @@ -83,23 +83,23 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the [cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). -#### Security +### Security Giving telegraf access to the Docker daemon expands the [attack surface](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface) that could result in an attacker gaining root access to a machine. This is especially relevant if the telegraf configuration can be changed by untrusted users. -#### Docker Daemon Permissions +### Docker Daemon Permissions Typically, telegraf must be given permission to access the docker daemon unix socket when using the default endpoint. This can be done by adding the `telegraf` unix user (created when installing a Telegraf package) to the `docker` unix group with the following command: -``` +```shell sudo usermod -aG docker telegraf ``` @@ -108,12 +108,12 @@ within the telegraf container. This can be done in the docker CLI by add the option `-v /var/run/docker.sock:/var/run/docker.sock` or adding the following lines to the telegraf container definition in a docker compose file: -``` +```yaml volumes: - /var/run/docker.sock:/var/run/docker.sock ``` -#### source tag +### source tag Selecting the containers measurements can be tricky if you have many containers with the same name. To alleviate this issue you can set the below value to `true` @@ -124,20 +124,20 @@ source_tag = true This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. -#### Kubernetes Labels +### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you may prefer to exclude them: -``` + +```json docker_label_exclude = ["annotation.kubernetes*"] ``` +### Docker-compose Labels -#### Docker-compose Labels +Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. -Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. - -``` +```json docker_label_include = [ "com.docker.compose.config-hash", "com.docker.compose.container-number", @@ -147,15 +147,14 @@ Docker-compose will add labels to your containers. You can limit restrict labels ] ``` - -### Metrics: +### Metrics - docker - tags: - unit - engine_host - server_version - + fields: + - fields: - n_used_file_descriptors - n_cpus - n_containers @@ -171,12 +170,12 @@ Docker-compose will add labels to your containers. You can limit restrict labels The `docker_data` and `docker_metadata` measurements are available only for some storage drivers such as devicemapper. -+ docker_data (deprecated see: `docker_devicemapper`) +- docker_data (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - + fields: + - fields: - available - total - used @@ -186,7 +185,7 @@ some storage drivers such as devicemapper. - unit - engine_host - server_version - + fields: + - fields: - available - total - used @@ -198,7 +197,7 @@ The above measurements for the devicemapper storage driver can now be found in t - engine_host - server_version - pool_name - + fields: + - fields: - pool_blocksize_bytes - data_space_used_bytes - data_space_total_bytes @@ -208,7 +207,7 @@ The above measurements for the devicemapper storage driver can now be found in t - metadata_space_available_bytes - thin_pool_minimum_free_space_bytes -+ docker_container_mem +- docker_container_mem - tags: - engine_host - server_version @@ -216,7 +215,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_name - container_status - container_version - + fields: + - fields: - total_pgmajfault - cache - mapped_file @@ -261,7 +260,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - cpu - + fields: + - fields: - throttling_periods - throttling_throttled_periods - throttling_throttled_time @@ -272,7 +271,7 @@ The above measurements for the devicemapper storage driver can now be found in t - usage_percent - container_id -+ docker_container_net +- docker_container_net - tags: - engine_host - server_version @@ -281,7 +280,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - network - + fields: + - fields: - rx_dropped - rx_bytes - rx_errors @@ -327,8 +326,8 @@ status if configured. - container_status - container_version - fields: - - health_status (string) - - failing_streak (integer) + - health_status (string) + - failing_streak (integer) - docker_container_status - tags: @@ -356,9 +355,9 @@ status if configured. - tasks_desired - tasks_running -### Example Output: +## Example -``` +```shell docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000 docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000 docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000 diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md index d2f0dc6144ff9..99fbcee0512fe 100644 --- a/plugins/inputs/docker_log/README.md +++ b/plugins/inputs/docker_log/README.md @@ -12,7 +12,7 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the [Official Docker Client]: https://github.com/moby/moby/tree/master/client [Engine API]: https://docs.docker.com/engine/api/v1.24/ -### Configuration +## Configuration ```toml [[inputs.docker_log]] @@ -54,14 +54,14 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the [CLI Docker environment variables][env] [env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient -### source tag +## source tag Selecting the containers can be tricky if you have many containers with the same name. To alleviate this issue you can set the below value to `true` @@ -72,7 +72,7 @@ source_tag = true This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. -### Metrics +## Metrics - docker_log - tags: @@ -85,9 +85,9 @@ This will cause all data points to have the `source` tag be set to the first 12 - container_id - message -### Example Output +## Example Output -``` +```shell docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000 diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 9e44d99edbc07..573cbd1f79d04 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -6,7 +6,7 @@ metrics on configured domains. When using Dovecot v2.3 you are still able to use this protocol by following the [upgrading steps][upgrading]. -### Configuration: +## Configuration ```toml # Read metrics about dovecot servers @@ -23,50 +23,49 @@ the [upgrading steps][upgrading]. ## Type is one of "user", "domain", "ip", or "global" type = "global" - + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] ``` -### Metrics: +## Metrics - dovecot - tags: - - server (hostname) - - type (query type) - - ip (ip addr) - - user (username) - - domain (domain name) + - server (hostname) + - type (query type) + - ip (ip addr) + - user (username) + - domain (domain name) - fields: - - reset_timestamp (string) - - last_update (string) - - num_logins (integer) - - num_cmds (integer) - - num_connected_sessions (integer) - - user_cpu (float) - - sys_cpu (float) - - clock_time (float) - - min_faults (integer) - - maj_faults (integer) - - vol_cs (integer) - - invol_cs (integer) - - disk_input (integer) - - disk_output (integer) - - read_count (integer) - - read_bytes (integer) - - write_count (integer) - - write_bytes (integer) - - mail_lookup_path (integer) - - mail_lookup_attr (integer) - - mail_read_count (integer) - - mail_read_bytes (integer) - - mail_cache_hits (integer) - + - reset_timestamp (string) + - last_update (string) + - num_logins (integer) + - num_cmds (integer) + - num_connected_sessions (integer) + - user_cpu (float) + - sys_cpu (float) + - clock_time (float) + - min_faults (integer) + - maj_faults (integer) + - vol_cs (integer) + - invol_cs (integer) + - disk_input (integer) + - disk_output (integer) + - read_count (integer) + - read_bytes (integer) + - write_count (integer) + - write_bytes (integer) + - mail_lookup_path (integer) + - mail_lookup_attr (integer) + - mail_read_count (integer) + - mail_read_bytes (integer) + - mail_cache_hits (integer) -### Example Output: +### Example Output -``` +```shell dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 ``` diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md index 00398760d2e9d..1570227ac1778 100644 --- a/plugins/inputs/dpdk/README.md +++ b/plugins/inputs/dpdk/README.md @@ -1,4 +1,5 @@ # Data Plane Development Kit (DPDK) Input Plugin + The `dpdk` plugin collects metrics exposed by applications built with [Data Plane Development Kit](https://www.dpdk.org/) which is an extensive set of open source libraries designed for accelerating packet processing workloads. @@ -23,13 +24,15 @@ to discover and test the capabilities of DPDK libraries and to explore the expos > `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is > available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable > querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. - +> > **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK > will also require root access. This means that either access permissions have to be adjusted for socket telemetry > interface to allow Telegraf to access it, or Telegraf should run with root privileges. ## Configuration + This plugin offers multiple configuration options, please review examples below for additional usage information. + ```toml # Reads metrics from DPDK applications using v2 telemetry interface. [[inputs.dpdk]] @@ -50,7 +53,7 @@ This plugin offers multiple configuration options, please review examples below ## List of custom, application-specific telemetry commands to query ## The list of available commands depend on the application deployed. Applications can register their own commands ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands - ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: ## additional_commands = ["/l3fwd-power/stats"] # additional_commands = [] @@ -60,28 +63,34 @@ This plugin offers multiple configuration options, please review examples below exclude_commands = ["/ethdev/link_status"] ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify - ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. ## [inputs.dpdk.tags] ## dpdk_instance = "my-fwd-app" ``` ### Example: Minimal Configuration for NIC metrics + This configuration allows getting metrics for all devices reported via `/ethdev/list` command: + * `/ethdev/stats` - basic device statistics (since `DPDK 20.11`) * `/ethdev/xstats` - extended device statistics * `/ethdev/link_status` - up/down link status + ```toml [[inputs.dpdk]] device_types = ["ethdev"] ``` + Since this configuration will query `/ethdev/link_status` it's recommended to increase timeout to `socket_access_timeout = "10s"`. The [plugin collecting interval](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) should be adjusted accordingly (e.g. `interval = "30s"`). ### Example: Excluding NIC link status from being collected + Checking link status depending on underlying implementation may take more time to complete. This configuration can be used to exclude this telemetry command to allow faster response for metrics. + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -89,13 +98,16 @@ This configuration can be used to exclude this telemetry command to allow faster [inputs.dpdk.ethdev] exclude_commands = ["/ethdev/link_status"] ``` + A separate plugin instance with higher timeout settings can be used to get `/ethdev/link_status` independently. Consult [Independent NIC link status configuration](#example-independent-nic-link-status-configuration) and [Getting metrics from multiple DPDK instances running on same host](#example-getting-metrics-from-multiple-dpdk-instances-running-on-same-host) examples for further details. ### Example: Independent NIC link status configuration + This configuration allows getting `/ethdev/link_status` using separate configuration, with higher timeout. + ```toml [[inputs.dpdk]] interval = "30s" @@ -107,8 +119,10 @@ This configuration allows getting `/ethdev/link_status` using separate configura ``` ### Example: Getting application-specific metrics -This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from + +This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from [L3 Forwarding with Power Management Sample Application](https://doc.dpdk.org/guides/sample_app_ug/l3_forward_power_man.html). + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -117,18 +131,22 @@ This configuration allows reading custom metrics exposed by applications. Exampl [inputs.dpdk.ethdev] exclude_commands = ["/ethdev/link_status"] ``` + Command entries specified in `additional_commands` should match DPDK command format: + * Command entry format: either `command` or `command,params` for commands that expect parameters, where comma (`,`) separates command from params. * Command entry length (command with params) should be `< 1024` characters. * Command length (without params) should be `< 56` characters. * Commands have to start with `/`. Providing invalid commands will prevent the plugin from starting. Additional commands allow duplicates, but they -will be removed during execution so each command will be executed only once during each metric gathering interval. +will be removed during execution so each command will be executed only once during each metric gathering interval. ### Example: Getting metrics from multiple DPDK instances running on same host + This configuration allows getting metrics from two separate applications exposing their telemetry interfaces -via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. +via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. + ```toml # Instance #1 - L3 Forwarding with Power Management Application [[inputs.dpdk]] @@ -153,22 +171,26 @@ via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` [inputs.dpdk.tags] dpdk_instance = "l2fwd-cat" ``` + This utilizes Telegraf's standard capability of [adding custom tags](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) to input plugin's measurements. ## Metrics + The DPDK socket accepts `command,params` requests and returns metric data in JSON format. All metrics from DPDK socket -become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. +become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. If DPDK response contains no information (is empty or is null) then such response will be discarded. -> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK +> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK > may contain various sets of metrics. While metrics from `/ethdev/stats` should be most stable, the `/ethdev/xstats` > may contain driver-specific metrics (depending on DPDK application configuration). The application-specific commands > like `/l3fwd-power/stats` can return their own specific set of metrics. ## Example output + The output consists of plugin name (`dpdk`), and a set of tags that identify querying hierarchy: -``` + +```shell dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fields] [timestamp] ``` @@ -177,9 +199,10 @@ dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fi | `host` | hostname of the machine (consult [Telegraf Agent configuration](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent) for additional details) | | `dpdk_instance` | custom tag from `[inputs.dpdk.tags]` (optional) | | `command` | executed command (without params) | -| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`
For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | +| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`. For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | When running plugin configuration below... + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -189,7 +212,8 @@ When running plugin configuration below... ``` ...expected output for `dpdk` plugin instance running on host named `host=dpdk-host`: -``` + +```shell dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 out_octets_encrypted=0,rx_fcoe_mbuf_allocation_errors=0,tx_q1packets=0,rx_priority0_xoff_packets=0,rx_priority7_xoff_packets=0,rx_errors=0,mac_remote_errors=0,in_pkts_invalid=0,tx_priority3_xoff_packets=0,tx_errors=0,rx_fcoe_bytes=0,rx_flow_control_xon_packets=0,rx_priority4_xoff_packets=0,tx_priority2_xoff_packets=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_management_packets=0,rx_priority7_dropped=0,rx_priority4_dropped=0,in_pkts_unchecked=0,rx_error_bytes=0,rx_size_256_to_511_packets=0,tx_priority4_xoff_packets=0,rx_priority6_xon_packets=0,tx_priority4_xon_to_xoff_packets=0,in_pkts_delayed=0,rx_priority0_mbuf_allocation_errors=0,out_octets_protected=0,tx_priority7_xon_to_xoff_packets=0,tx_priority1_xon_to_xoff_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_priority6_xon_to_xoff_packets=0,flow_director_filter_add_errors=0,rx_total_packets=99,rx_crc_errors=0,flow_director_filter_remove_errors=0,rx_missed_errors=0,tx_size_64_packets=0,rx_priority3_dropped=0,flow_director_matched_filters=0,tx_priority2_xon_to_xoff_packets=0,rx_priority1_xon_packets=0,rx_size_65_to_127_packets=99,rx_fragment_errors=0,in_pkts_notusingsa=0,rx_q0bytes=7162,rx_fcoe_dropped=0,rx_priority1_dropped=0,rx_fcoe_packets=0,rx_priority5_xoff_packets=0,out_pkts_protected=0,tx_total_packets=0,rx_priority2_dropped=0,in_pkts_late=0,tx_q1bytes=0,in_pkts_badtag=0,rx_multicast_packets=99,rx_priority6_xoff_packets=0,tx_flow_control_xoff_packets=0,rx_flow_control_xoff_packets=0,rx_priority0_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,rx_priority7_mbuf_allocation_errors=0,tx_priority0_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,tx_q0packets=0,tx_xoff_packets=0,rx_size_512_to_1023_packets=0,rx_priority3_xon_packets=0,rx_q0errors=0,rx_oversize_errors=0,tx_priority4_xon_packets=0,tx_priority5_xoff_packets=0,rx_priority5_xon_packets=0,rx_total_missed_packets=0,rx_priority4_mbuf_allocation_errors=0,tx_priority1_xon_packets=0,tx_management_packets=0,rx_priority5_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,rx_undersize_errors=0,tx_priority1_xoff_packets=0,rx_q0packets=99,tx_q2packets=0,tx_priority6_xon_packets=0,rx_good_packets=99,tx_priority5_xon_packets=0,tx_size_256_to_511_packets=0,rx_priority6_dropped=0,rx_broadcast_packets=0,tx_size_512_to_1023_packets=0,tx_priority3_xon_to_xoff_packets=0,in_pkts_unknownsci=0,in_octets_validated=0,tx_priority6_xoff_packets=0,tx_priority7_xoff_packets=0,rx_jabber_errors=0,tx_priority7_xon_packets=0,tx_priority0_xon_packets=0,in_pkts_unusedsa=0,tx_priority0_xoff_packets=0,mac_local_errors=33,rx_total_bytes=7162,in_pkts_notvalid=0,rx_length_errors=0,in_octets_decrypted=0,rx_size_128_to_255_packets=0,rx_good_bytes=7162,tx_size_65_to_127_packets=0,rx_mac_short_packet_dropped=0,tx_size_1024_to_max_packets=0,rx_priority2_mbuf_allocation_errors=0,flow_director_added_filters=0,tx_multicast_packets=0,rx_fcoe_crc_errors=0,rx_priority1_xoff_packets=0,flow_director_missed_filters=0,rx_xon_packets=0,tx_size_128_to_255_packets=0,out_pkts_encrypted=0,rx_priority4_xon_packets=0,rx_priority0_dropped=0,rx_size_1024_to_max_packets=0,tx_good_bytes=0,rx_management_dropped=0,rx_mbuf_allocation_errors=0,tx_xon_packets=0,rx_priority3_xoff_packets=0,tx_good_packets=0,tx_fcoe_bytes=0,rx_priority6_mbuf_allocation_errors=0,rx_priority2_xon_packets=0,tx_broadcast_packets=0,tx_q2bytes=0,rx_priority7_xon_packets=0,out_pkts_untagged=0,rx_priority2_xoff_packets=0,rx_priority1_mbuf_allocation_errors=0,tx_q0bytes=0,rx_size_64_packets=0,rx_priority5_dropped=0,tx_priority2_xon_packets=0,in_pkts_nosci=0,flow_director_removed_filters=0,in_pkts_ok=0,rx_l3_l4_xsum_error=0,rx_priority3_mbuf_allocation_errors=0,tx_priority3_xon_packets=0 1606310780000000000 diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 0bf8b983cd219..b5152a3ebfab8 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -14,7 +14,7 @@ formats. The amazon-ecs-agent (though it _is_ a container running on the host) is not present in the metadata/stats endpoints. -### Configuration +## Configuration ```toml # Read metrics about ECS containers @@ -45,7 +45,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Configuration (enforce v2 metadata) +## Configuration (enforce v2 metadata) ```toml # Read metrics about ECS containers @@ -76,7 +76,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Metrics +## Metrics - ecs_task - tags: @@ -92,7 +92,7 @@ present in the metadata/stats endpoints. - limit_cpu (float) - limit_mem (float) -+ ecs_container_mem +- ecs_container_mem - tags: - cluster - task_arn @@ -158,7 +158,7 @@ present in the metadata/stats endpoints. - usage_percent - usage_total -+ ecs_container_net +- ecs_container_net - tags: - cluster - task_arn @@ -200,7 +200,7 @@ present in the metadata/stats endpoints. - io_serviced_recursive_total - io_serviced_recursive_write -+ ecs_container_meta +- ecs_container_meta - tags: - cluster - task_arn @@ -221,10 +221,9 @@ present in the metadata/stats endpoints. - started_at - type +## Example -### Example Output - -``` +```shell ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 @@ -242,4 +241,4 @@ ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs [docker-input]: /plugins/inputs/docker/README.md [task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html -[task-metadata-endpoint-v3] https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html +[task-metadata-endpoint-v3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 0afb0e325dbdd..e39bc025edb88 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -12,6 +12,7 @@ In addition, the following optional queries are only made by the master node: [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) Specific Elasticsearch endpoints that are queried: + - Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting - Cluster Heath: /_cluster/health?level=indices - Cluster Stats: /_cluster/stats @@ -20,7 +21,7 @@ Specific Elasticsearch endpoints that are queried: Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. -### Configuration +## Configuration ```toml [[inputs.elasticsearch]] @@ -81,7 +82,7 @@ Note that specific statistics information can change between Elasticsearch versi # num_most_recent_indices = 0 ``` -### Metrics +## Metrics Emitted when `cluster_health = true`: @@ -169,7 +170,7 @@ Emitted when `cluster_stats = true`: - shards_total (float) - store_size_in_bytes (float) -+ elasticsearch_clusterstats_nodes +- elasticsearch_clusterstats_nodes - tags: - cluster_name - node_name @@ -230,7 +231,7 @@ Emitted when the appropriate `node_stats` options are set. - tx_count (float) - tx_size_in_bytes (float) -+ elasticsearch_breakers +- elasticsearch_breakers - tags: - cluster_name - node_attribute_ml.enabled @@ -291,7 +292,7 @@ Emitted when the appropriate `node_stats` options are set. - total_free_in_bytes (float) - total_total_in_bytes (float) -+ elasticsearch_http +- elasticsearch_http - tags: - cluster_name - node_attribute_ml.enabled @@ -402,7 +403,7 @@ Emitted when the appropriate `node_stats` options are set. - warmer_total (float) - warmer_total_time_in_millis (float) -+ elasticsearch_jvm +- elasticsearch_jvm - tags: - cluster_name - node_attribute_ml.enabled @@ -480,7 +481,7 @@ Emitted when the appropriate `node_stats` options are set. - swap_used_in_bytes (float) - timestamp (float) -+ elasticsearch_process +- elasticsearch_process - tags: - cluster_name - node_attribute_ml.enabled diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 333630c958703..9a805f3ca5789 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -2,7 +2,7 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver. -### Configuration: +## Configuration ```toml # Returns ethtool statistics for given interfaces @@ -30,13 +30,13 @@ Interfaces can be included or ignored using: Note that loopback interfaces will be automatically ignored. -### Metrics: +## Metrics Metrics are dependent on the network device and driver. -### Example Output: +## Example Output -``` +```shell ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,interface_up=1i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,interface_up=0i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 ``` diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index c0533b513b8bf..dc99bd281f6e3 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -2,15 +2,15 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. -### IoT Hub Setup +## IoT Hub Setup The main focus for development of this plugin is Azure IoT hub: -1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ +1. Create an Azure IoT Hub by following any of the guides provided here: [Azure IoT Hub](https://docs.microsoft.com/en-us/azure/iot-hub/) 2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) 3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work -### Configuration +## Configuration ```toml [[inputs.eventhub_consumer]] @@ -98,7 +98,7 @@ The main focus for development of this plugin is Azure IoT hub: data_format = "influx" ``` -#### Environment Variables +### Environment Variables [Full documentation of the available environment variables][envvar]. diff --git a/plugins/inputs/example/README.md b/plugins/inputs/example/README.md index 6b86615b0e6a8..5778494f0af1e 100644 --- a/plugins/inputs/example/README.md +++ b/plugins/inputs/example/README.md @@ -7,7 +7,7 @@ additional information can be found. Telegraf minimum version: Telegraf x.x Plugin minimum tested version: x.x -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. @@ -17,12 +17,12 @@ generate it using `telegraf --usage `. example_option = "example_value" ``` -#### example_option +### example_option A more in depth description of an option can be provided here, but only do so if the option cannot be fully described in the sample config. -### Metrics +## Metrics Here you should add an optional description and links to where the user can get more information about the measurements. @@ -39,7 +39,7 @@ mapped to the output. - field1 (type, unit) - field2 (float, percent) -+ measurement2 +- measurement2 - tags: - tag3 - fields: @@ -49,29 +49,30 @@ mapped to the output. - field6 (float) - field7 (boolean) -### Sample Queries +## Sample Queries This section can contain some useful InfluxDB queries that can be used to get started with the plugin or to generate dashboards. For each query listed, describe at a high level what data is returned. Get the max, mean, and min for the measurement in the last hour: -``` + +```sql SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag ``` -### Troubleshooting +## Troubleshooting This optional section can provide basic troubleshooting steps that a user can perform. -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455 measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455 ``` diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 4e3d7245422d2..e682ef4abbe0c 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -5,7 +5,7 @@ their output in any one of the accepted [Input Data Formats](https://github.com/ This plugin can be used to poll for custom metrics from any source. -### Configuration: +## Configuration ```toml [[inputs.exec]] @@ -32,15 +32,17 @@ This plugin can be used to poll for custom metrics from any source. Glob patterns in the `command` option are matched on every run, so adding new scripts that match the pattern will cause them to be picked up immediately. -### Example: +## Example This script produces static values, since no timestamp is specified the values are at the current time. + ```sh #!/bin/sh echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i' ``` It can be paired with the following configuration and will be run at the `interval` of the agent. + ```toml [[inputs.exec]] commands = ["sh /tmp/test.sh"] @@ -48,18 +50,19 @@ It can be paired with the following configuration and will be run at the `interv data_format = "influx" ``` -### Common Issues: +## Common Issues -#### My script works when I run it by hand, but not when Telegraf is running as a service. +### My script works when I run it by hand, but not when Telegraf is running as a service This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. -#### With a PowerShell on Windows, the output of the script appears to be truncated. +### With a PowerShell on Windows, the output of the script appears to be truncated You may need to set a variable in your script to increase the number of columns available for output: -``` + +```shell $host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) ``` diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index aa37e7cd7696a..c5299713cece8 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -1,7 +1,7 @@ # Execd Input Plugin -The `execd` plugin runs an external program as a long-running daemon. -The programs must output metrics in any one of the accepted +The `execd` plugin runs an external program as a long-running daemon. +The programs must output metrics in any one of the accepted [Input Data Formats][] on the process's STDOUT, and is expected to stay running. If you'd instead like the process to collect metrics and then exit, check out the [inputs.exec][] plugin. @@ -13,7 +13,7 @@ new line to the process's STDIN. STDERR from the process will be relayed to Telegraf as errors in the logs. -### Configuration: +## Configuration ```toml [[inputs.execd]] @@ -41,9 +41,9 @@ STDERR from the process will be relayed to Telegraf as errors in the logs. data_format = "influx" ``` -### Example +## Example -##### Daemon written in bash using STDIN signaling +### Daemon written in bash using STDIN signaling ```bash #!/bin/bash @@ -62,7 +62,7 @@ done signal = "STDIN" ``` -##### Go daemon using SIGHUP +### Go daemon using SIGHUP ```go package main @@ -96,7 +96,7 @@ func main() { signal = "SIGHUP" ``` -##### Ruby daemon running standalone +### Ruby daemon running standalone ```ruby #!/usr/bin/env ruby diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index 1762bbaf209cb..221f9d5b44c68 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -9,7 +9,7 @@ Acquiring the required permissions can be done using several methods: - [Use sudo](#using-sudo) run fail2ban-client. - Run telegraf as root. (not recommended) -### Configuration +## Configuration ```toml # Read metrics from fail2ban. @@ -18,7 +18,7 @@ Acquiring the required permissions can be done using several methods: use_sudo = false ``` -### Using sudo +## Using sudo Make sure to set `use_sudo = true` in your configuration file. @@ -26,20 +26,21 @@ You will also need to update your sudoers file. It is recommended to modify a file in the `/etc/sudoers.d` directory using `visudo`: ```bash -$ sudo visudo -f /etc/sudoers.d/telegraf +sudo visudo -f /etc/sudoers.d/telegraf ``` Add the following lines to the file, these commands allow the `telegraf` user to call `fail2ban-client` without needing to provide a password and disables logging of the call in the auth.log. Consult `man 8 visudo` and `man 5 sudoers` for details. -``` + +```text Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN Defaults!FAIL2BAN !logfile, !syslog, !pam_session ``` -### Metrics +## Metrics - fail2ban - tags: @@ -50,7 +51,7 @@ Defaults!FAIL2BAN !logfile, !syslog, !pam_session ### Example Output -``` +```shell # fail2ban-client status sshd Status for the jail: sshd |- Filter @@ -63,6 +64,6 @@ Status for the jail: sshd `- Banned IP list: 192.168.0.1 192.168.0.2 ``` -``` +```shell fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000 ``` diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md index 54c20310224b3..d02af0d5b8f74 100644 --- a/plugins/inputs/fibaro/README.md +++ b/plugins/inputs/fibaro/README.md @@ -3,7 +3,7 @@ The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices. Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc. -### Configuration: +## Configuration ```toml # Read devices value(s) from a Fibaro controller @@ -20,7 +20,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers # timeout = "5s" ``` -### Metrics: +## Metrics - fibaro - tags: @@ -36,10 +36,9 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers - value (float) - value2 (float, when available from device) +## Example Output -### Example Output: - -``` +```shell fibaro,deviceId=9,host=vm1,name=Fenêtre\ haute,room=Cuisine,section=Cuisine,type=com.fibaro.FGRM222 energy=2.04,power=0.7,value=99,value2=99 1529996807000000000 fibaro,deviceId=10,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1529996807000000000 fibaro,deviceId=13,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 energy=4.33,power=0.7,value=99,value2=99 1529996807000000000 diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 8ec406da7be3d..91ed7a8e1bc56 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -6,7 +6,7 @@ the selected [input data format][]. **Note:** If you wish to parse only newly appended lines use the [tail][] input plugin instead. -### Configuration: +## Configuration ```toml [[inputs.file]] @@ -20,10 +20,10 @@ plugin instead. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" ``` diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index 81fc75908e798..9c0d4d79dd3b9 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -2,7 +2,7 @@ Reports the number and total size of files in specified directories. -### Configuration: +## Configuration ```toml [[inputs.filecount]] @@ -42,7 +42,7 @@ Reports the number and total size of files in specified directories. mtime = "0s" ``` -### Metrics +## Metrics - filecount - tags: @@ -51,9 +51,9 @@ Reports the number and total size of files in specified directories. - count (integer) - size_bytes (integer) -### Example Output: +## Example Output -``` +```shell filecount,directory=/var/cache/apt count=7i,size_bytes=7438336i 1530034445000000000 filecount,directory=/tmp count=17i,size_bytes=28934786i 1530034445000000000 ``` diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 840cafb53c06a..c8670471a9870 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -2,7 +2,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. -### Configuration: +## Configuration ```toml # Read stats about given file(s) @@ -16,22 +16,22 @@ The filestat plugin gathers metrics about file existence, size, and other stats. md5 = false ``` -### Measurements & Fields: +## Measurements & Fields - filestat - - exists (int, 0 | 1) - - size_bytes (int, bytes) - - modification_time (int, unix time nanoseconds) - - md5 (optional, string) + - exists (int, 0 | 1) + - size_bytes (int, bytes) + - modification_time (int, unix time nanoseconds) + - md5 (optional, string) -### Tags: +## Tags - All measurements have the following tags: - - file (the path the to file, as specified in the config) + - file (the path the to file, as specified in the config) -### Example Output: +### Example -``` +```shell $ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test * Plugin: filestat, Collection 1 > filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351 diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index 7e1f351fa0b7f..9b118c0814a78 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -4,7 +4,7 @@ The fireboard plugin gathers the real time temperature data from fireboard thermometers. In order to use this input plugin, you'll need to sign up to use the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). -### Configuration +## Configuration ```toml [[inputs.fireboard]] @@ -16,23 +16,23 @@ the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). # http_timeout = 4 ``` -#### auth_token +### auth_token In lieu of requiring a username and password, this plugin requires an authentication token that you can generate using the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html#Authentication). -#### url +### url While there should be no reason to override the URL, the option is available in case Fireboard changes their site, etc. -#### http_timeout +### http_timeout If you need to increase the HTTP timeout, you can do so here. You can set this value in seconds. The default value is four (4) seconds. -### Metrics +## Metrics The Fireboard REST API docs have good examples of the data that is available, currently this input only returns the real time temperatures. Temperature @@ -47,12 +47,12 @@ values are included if they are less than a minute old. - fields: - temperature (float, unit) -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 ``` diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index 3fabbddb75012..a7947ab2a7397 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -7,7 +7,8 @@ You might need to adjust your fluentd configuration, in order to reduce series c According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. example configuration with `@id` parameter for http plugin: -``` + +```text @type http @id http @@ -15,7 +16,7 @@ example configuration with `@id` parameter for http plugin: ``` -### Configuration: +## Configuration ```toml # Read metrics exposed by fluentd in_monitor plugin @@ -29,30 +30,30 @@ example configuration with `@id` parameter for http plugin: ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) exclude = [ - "monitor_agent", - "dummy", + "monitor_agent", + "dummy", ] ``` -### Measurements & Fields: +## Measurements & Fields Fields may vary depending on the plugin type - fluentd - - retry_count (float, unit) - - buffer_queue_length (float, unit) - - buffer_total_queued_size (float, unit) + - retry_count (float, unit) + - buffer_queue_length (float, unit) + - buffer_total_queued_size (float, unit) -### Tags: +## Tags - All measurements have the following tags: - - plugin_id (unique plugin id) - - plugin_type (type of the plugin e.g. s3) + - plugin_id (unique plugin id) + - plugin_type (type of the plugin e.g. s3) - plugin_category (plugin category e.g. output) -### Example Output: +## Example Output -``` +```shell $ telegraf --config fluentd.conf --input-filter fluentd --test * Plugin: inputs.fluentd, Collection 1 > fluentd,host=T440s,plugin_id=object:9f748c,plugin_category=input,plugin_type=dummy buffer_total_queued_size=0,buffer_queue_length=0,retry_count=0 1492006105000000000 From 79e479c6918db672d41ab5d6b44cf000dd27dd98 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 11:56:55 -0700 Subject: [PATCH 078/133] fix: markdown: resolve all markdown issues with g-h (#10172) --- plugins/inputs/github/README.md | 20 ++++++++--------- plugins/inputs/gnmi/README.md | 11 ++++----- plugins/inputs/graylog/README.md | 9 ++++---- plugins/inputs/haproxy/README.md | 18 +++++++-------- plugins/inputs/hddtemp/README.md | 9 ++++---- plugins/inputs/http/README.md | 7 +++--- plugins/inputs/http_listener_v2/README.md | 27 +++++++++++++---------- plugins/inputs/http_response/README.md | 11 +++++---- plugins/inputs/httpjson/README.md | 17 +++++++------- 9 files changed, 65 insertions(+), 64 deletions(-) diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index a920a48f54e1d..ed47cdfc4766c 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -5,14 +5,14 @@ Gather repository information from [GitHub][] hosted repositories. **Note:** Telegraf also contains the [webhook][] input which can be used as an alternative method for collecting repository information. -### Configuration +## Configuration ```toml [[inputs.github]] ## List of repositories to monitor repositories = [ - "influxdata/telegraf", - "influxdata/influxdb" + "influxdata/telegraf", + "influxdata/influxdb" ] ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -25,11 +25,11 @@ alternative method for collecting repository information. # http_timeout = "5s" ## List of additional fields to query. - ## NOTE: Getting those fields might involve issuing additional API-calls, so please - ## make sure you do not exceed the rate-limit of GitHub. - ## - ## Available fields are: - ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) # additional_fields = [] ``` @@ -52,7 +52,7 @@ alternative method for collecting repository information. When the [internal][] input is enabled: -+ internal_github +- internal_github - tags: - access_token - An obfuscated reference to the configured access token or "Unauthenticated" - fields: @@ -72,7 +72,7 @@ In the following we list the available options with the required API-calls and t ### Example Output -``` +```shell github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000 ``` diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index aa940f76d4e14..e7bbee0ea71dd 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -2,11 +2,11 @@ This plugin consumes telemetry data based on the [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. This input plugin is vendor-agnostic and is supported on any platform that supports the gNMI spec. -For Cisco devices: -It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. +For Cisco devices: +It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. -### Configuration +## Configuration ```toml [[inputs.gnmi]] @@ -66,8 +66,9 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- # heartbeat_interval = "60s" ``` -### Example Output -``` +## Example Output + +```shell ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 ``` diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 6a835f1d60a4f..8f07147c6b5eb 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -4,15 +4,14 @@ The Graylog plugin can collect data from remote Graylog service URLs. Plugin currently support two type of end points:- -- multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) -- namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) +- multiple (e.g. `http://[graylog-server-ip]:12900/system/metrics/multiple`) +- namespace (e.g. `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`) End Point can be a mix of one multiple end point and several namespaces end points - Note: if namespace end point specified metrics array will be ignored for that call. -### Configuration: +## Configuration ```toml # Read flattened metrics from one or more GrayLog HTTP endpoints @@ -52,4 +51,4 @@ Note: if namespace end point specified metrics array will be ignored for that ca # insecure_skip_verify = false ``` -Please refer to GrayLog metrics api browser for full metric end points http://host:12900/api-browser +Please refer to GrayLog metrics api browser for full metric end points `http://host:12900/api-browser` diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 86fbb986b696a..4ce0070d91820 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -5,7 +5,7 @@ The [HAProxy](http://www.haproxy.org/) input plugin gathers using the [stats socket](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9.3) or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9) of a HAProxy server. -### Configuration: +## Configuration ```toml # Read metrics of HAProxy, via socket or HTTP stats page @@ -40,7 +40,7 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management. # insecure_skip_verify = false ``` -#### HAProxy Configuration +### HAProxy Configuration The following information may be useful when getting started, but please consult the HAProxy documentation for complete and up to date instructions. @@ -51,8 +51,7 @@ settings. To enable the unix socket begin by reading about the [`stats socket`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#3.1-stats%20socket) option. - -#### servers +### servers Server addresses must explicitly start with 'http' if you wish to use HAProxy status page. Otherwise, addresses will be assumed to be an UNIX socket and @@ -65,14 +64,14 @@ To use HTTP Basic Auth add the username and password in the userinfo section of the URL: `http://user:password@1.2.3.4/haproxy?stats`. The credentials are sent via the `Authorization` header and not using the request URL. - -#### keep_field_names +### keep_field_names By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names. The following renames are made: + - `pxname` -> `proxy` - `svname` -> `sv` - `act` -> `active_servers` @@ -86,7 +85,7 @@ The following renames are made: - `hrsp_5xx` -> `http_response.5xx` - `hrsp_other` -> `http_response.other` -### Metrics: +## Metrics For more details about collected metrics reference the [HAProxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). @@ -110,7 +109,8 @@ documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). - `lastsess` (int) - **all other stats** (int) -### Example Output: -``` +## Example Output + +```shell haproxy,server=/run/haproxy/admin.sock,proxy=public,sv=FRONTEND,type=frontend http_response.other=0i,req_rate_max=1i,comp_byp=0i,status="OPEN",rate_lim=0i,dses=0i,req_rate=0i,comp_rsp=0i,bout=9287i,comp_in=0i,mode="http",smax=1i,slim=2000i,http_response.1xx=0i,conn_rate=0i,dreq=0i,ereq=0i,iid=2i,rate_max=1i,http_response.2xx=1i,comp_out=0i,intercepted=1i,stot=2i,pid=1i,http_response.5xx=1i,http_response.3xx=0i,http_response.4xx=0i,conn_rate_max=1i,conn_tot=2i,dcon=0i,bin=294i,rate=0i,sid=0i,req_tot=2i,scur=0i,dresp=0i 1513293519000000000 ``` diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index d2d3e4f13ec89..71801a4eb1447 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -4,7 +4,7 @@ This plugin reads data from hddtemp daemon. Hddtemp should be installed and its daemon running. -### Configuration +## Configuration ```toml [[inputs.hddtemp]] @@ -19,7 +19,7 @@ Hddtemp should be installed and its daemon running. # devices = ["sda", "*"] ``` -### Metrics +## Metrics - hddtemp - tags: @@ -31,10 +31,9 @@ Hddtemp should be installed and its daemon running. - fields: - temperature +## Example output -### Example output - -``` +```shell hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000 hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000 diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 95591b9f0ad22..11385806dd8ea 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -2,8 +2,7 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The endpoint should have metrics formatted in one of the supported [input data formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own unique set of configuration options which can be added to the input configuration. - -### Configuration: +## Configuration ```toml # Read formatted metrics from one or more HTTP endpoints @@ -73,7 +72,7 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ``` -### Metrics: +## Metrics The metrics collected by this input plugin will depend on the configured `data_format` and the payload returned by the HTTP endpoint(s). @@ -83,6 +82,6 @@ The default values below are added if the input format does not specify a value: - tags: - url -### Optional Cookie Authentication Settings: +## Optional Cookie Authentication Settings The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index a87ec3f833890..9eebb3cd9a2aa 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,15 +1,15 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in -[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] -or [`influxdb_v2_listener`][influxdb_v2_listener] instead. +HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in +[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] +or [`influxdb_v2_listener`][influxdb_v2_listener] instead. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener] or [`influxdb_v2_listener`][influxdb_v2_listener]. -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -69,24 +69,27 @@ This is a sample configuration for the plugin. data_format = "influx" ``` -### Metrics: +## Metrics Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`. -### Troubleshooting: +## Troubleshooting -**Send Line Protocol** -``` +Send Line Protocol: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -**Send JSON** -``` +Send JSON: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' ``` -**Send query params** -``` +Send query params: + +```shell curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' ``` diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 81b512e80743f..bd800457faf8a 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -2,7 +2,7 @@ This input plugin checks HTTP/HTTPS connections. -### Configuration: +## Configuration ```toml # HTTP/HTTPS request given an address a method and a timeout @@ -79,7 +79,7 @@ This input plugin checks HTTP/HTTPS connections. # interface = "eth0" ``` -### Metrics: +## Metrics - http_response - tags: @@ -96,7 +96,7 @@ This input plugin checks HTTP/HTTPS connections. - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) -#### `result` / `result_code` +### `result` / `result_code` Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value. @@ -112,9 +112,8 @@ This tag is used to expose network and plugin errors. HTTP errors are considered |dns_error | 5 |There was a DNS error while attempting to connect to the host| |response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| +## Example Output -### Example Output: - -``` +```shell http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 ``` diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 3f7efb10a4098..e001e5b07b266 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,10 +1,10 @@ # HTTP JSON Input Plugin -### DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement. +**DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement** The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. -### Configuration: +## Configuration ```toml [[inputs.httpjson]] @@ -54,28 +54,28 @@ The httpjson plugin collects data from HTTP URLs which respond with JSON. It fl # apiVersion = "v1" ``` -### Measurements & Fields: +## Measurements & Fields - httpjson - - response_time (float): Response time in seconds + - response_time (float): Response time in seconds Additional fields are dependant on the response of the remote service being polled. -### Tags: +## Tags - All measurements have the following tags: - - server: HTTP origin as defined in configuration as `servers`. + - server: HTTP origin as defined in configuration as `servers`. Any top level keys listed under `tag_keys` in the configuration are added as tags. Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects. - -### Examples Output: +## Examples Output This plugin understands responses containing a single JSON object, or a JSON Array of Objects. **Object Output:** Given the following response body: + ```json { "a": 0.5, @@ -87,6 +87,7 @@ Given the following response body: "service": "service01" } ``` + The following metric is produced: `httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` From de6c2f74d68b0588311809a367e3982e8268bcfe Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 12:03:55 -0700 Subject: [PATCH 079/133] feat: add retry to 413 errors with InfluxDB output (#10130) --- plugins/outputs/influxdb_v2/http.go | 33 +++++++- plugins/outputs/influxdb_v2/http_test.go | 98 ++++++++++++++++++++++++ 2 files changed, 129 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index c076580255740..098ebd9dd5c4d 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -177,6 +177,12 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error if c.BucketTag == "" { err := c.writeBatch(ctx, c.Bucket, metrics) if err != nil { + if err, ok := err.(*APIError); ok { + if err.StatusCode == http.StatusRequestEntityTooLarge { + return c.splitAndWriteBatch(ctx, c.Bucket, metrics) + } + } + return err } } else { @@ -203,6 +209,12 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error for bucket, batch := range batches { err := c.writeBatch(ctx, bucket, batch) if err != nil { + if err, ok := err.(*APIError); ok { + if err.StatusCode == http.StatusRequestEntityTooLarge { + return c.splitAndWriteBatch(ctx, c.Bucket, metrics) + } + } + return err } } @@ -210,6 +222,17 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error return nil } +func (c *httpClient) splitAndWriteBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { + log.Printf("W! [outputs.influxdb_v2] Retrying write after splitting metric payload in half to reduce batch size") + midpoint := len(metrics) / 2 + + if err := c.writeBatch(ctx, bucket, metrics[:midpoint]); err != nil { + return err + } + + return c.writeBatch(ctx, bucket, metrics[midpoint:]) +} + func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { loc, err := makeWriteURL(*c.url, c.Organization, bucket) if err != nil { @@ -257,11 +280,17 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te } switch resp.StatusCode { + // request was too large, send back to try again + case http.StatusRequestEntityTooLarge: + log.Printf("E! [outputs.influxdb_v2] Failed to write metric, request was too large (413)") + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: desc, + } case // request was malformed: http.StatusBadRequest, - // request was too large: - http.StatusRequestEntityTooLarge, // request was received but server refused to process it due to a semantic problem with the request. // for example, submitting metrics outside the retention period. // Clients should *not* repeat the request and the metrics should be dropped. diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 0637cd8060bd0..e44729eec1b7a 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -111,3 +111,101 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { err = client.Write(ctx, metrics) require.NoError(t, err) } + +func TestTooLargeWriteRetry(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v2/write": + err := r.ParseForm() + require.NoError(t, err) + + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + + // Ensure metric body size is small + if len(body) > 16 { + w.WriteHeader(http.StatusRequestEntityTooLarge) + } else { + w.WriteHeader(http.StatusNoContent) + } + + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + addr := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + } + + config := &influxdb.HTTPConfig{ + URL: addr, + Bucket: "telegraf", + BucketTag: "bucket", + ExcludeBucketTag: true, + } + + client, err := influxdb.NewHTTPClient(config) + require.NoError(t, err) + + // Together the metric batch size is too big, split up, we get success + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "bucket": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "bucket": "bar", + }, + map[string]interface{}{ + "value": 99.0, + }, + time.Unix(0, 0), + ), + } + + ctx := context.Background() + err = client.Write(ctx, metrics) + require.NoError(t, err) + + // These metrics are too big, even after splitting in half, expect error + hugeMetrics := []telegraf.Metric{ + testutil.MustMetric( + "reallyLargeMetric", + map[string]string{ + "bucket": "foobar", + }, + map[string]interface{}{ + "value": 123.456, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "evenBiggerMetric", + map[string]string{ + "bucket": "fizzbuzzbang", + }, + map[string]interface{}{ + "value": 999.999, + }, + time.Unix(0, 0), + ), + } + + err = client.Write(ctx, hugeMetrics) + require.Error(t, err) +} From 84562877ccfefdf7f201bf9fec2ec23bb196a2fc Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 24 Nov 2021 12:18:53 -0700 Subject: [PATCH 080/133] chore: clean up all markdown lint errors for input plugins i through m (#10173) --- plugins/inputs/icinga2/README.md | 30 +- plugins/inputs/infiniband/README.md | 10 +- plugins/inputs/influxdb/README.md | 20 +- plugins/inputs/influxdb_listener/README.md | 9 +- plugins/inputs/influxdb_v2_listener/README.md | 9 +- plugins/inputs/intel_powerstat/README.md | 108 ++-- plugins/inputs/intel_rdt/README.md | 77 +-- plugins/inputs/internal/README.md | 68 ++- plugins/inputs/internet_speed/README.md | 3 +- plugins/inputs/interrupts/README.md | 13 +- plugins/inputs/ipmi_sensor/README.md | 38 +- plugins/inputs/ipset/README.md | 24 +- plugins/inputs/iptables/README.md | 36 +- plugins/inputs/ipvs/README.md | 14 +- plugins/inputs/jenkins/README.md | 17 +- plugins/inputs/jolokia/README.md | 9 +- plugins/inputs/jolokia2/README.md | 18 +- .../inputs/jti_openconfig_telemetry/README.md | 4 +- plugins/inputs/kafka_consumer/README.md | 2 +- .../inputs/kafka_consumer_legacy/README.md | 2 +- plugins/inputs/kapacitor/README.md | 276 +++++++---- plugins/inputs/kernel/README.md | 26 +- plugins/inputs/kernel_vmstat/README.md | 203 ++++---- plugins/inputs/kibana/README.md | 14 +- plugins/inputs/kinesis_consumer/README.md | 22 +- plugins/inputs/knx_listener/README.md | 19 +- plugins/inputs/kube_inventory/README.md | 26 +- plugins/inputs/kubernetes/README.md | 20 +- plugins/inputs/lanz/README.md | 30 +- plugins/inputs/leofs/README.md | 134 ++--- plugins/inputs/linux_sysctl_fs/README.md | 4 +- plugins/inputs/logparser/README.md | 13 +- plugins/inputs/logstash/README.md | 12 +- plugins/inputs/lustre2/README.md | 11 +- plugins/inputs/lvm/README.md | 10 +- plugins/inputs/mailchimp/README.md | 4 +- plugins/inputs/marklogic/README.md | 8 +- plugins/inputs/mcrouter/README.md | 14 +- plugins/inputs/mdstat/README.md | 17 +- plugins/inputs/mem/README.md | 10 +- plugins/inputs/memcached/README.md | 16 +- plugins/inputs/mesos/README.md | 467 +++++++++--------- plugins/inputs/minecraft/README.md | 23 +- plugins/inputs/modbus/README.md | 25 +- plugins/inputs/monit/README.md | 21 +- plugins/inputs/multifile/README.md | 28 +- plugins/inputs/mysql/README.md | 238 ++++----- 47 files changed, 1172 insertions(+), 1030 deletions(-) diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md index fb36d36f3730f..c6ecadb0c704c 100644 --- a/plugins/inputs/icinga2/README.md +++ b/plugins/inputs/icinga2/README.md @@ -6,7 +6,7 @@ The icinga2 plugin uses the icinga2 remote API to gather status on running services and hosts. You can read Icinga2's documentation for their remote API [here](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api) -### Configuration: +## Configuration ```toml # Description @@ -32,24 +32,24 @@ services and hosts. You can read Icinga2's documentation for their remote API # insecure_skip_verify = true ``` -### Measurements & Fields: +## Measurements & Fields - All measurements have the following fields: - - name (string) - - state_code (int) + - name (string) + - state_code (int) -### Tags: +## Tags - All measurements have the following tags: - - check_command - The short name of the check command - - display_name - The name of the service or host - - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services - - source - The icinga2 host - - port - The icinga2 port - - scheme - The icinga2 protocol (http/https) - - server - The server the check_command is running for + - check_command - The short name of the check command + - display_name - The name of the service or host + - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services + - source - The icinga2 host + - port - The icinga2 port + - scheme - The icinga2 protocol (http/https) + - server - The server the check_command is running for -### Sample Queries: +## Sample Queries ```sql SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status @@ -58,9 +58,9 @@ SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // SELECT * FROM "icinga2_services" WHERE state_code = 3 AND time > now() - 24h // Service with UNKNOWN status ``` -### Example Output: +## Example Output -``` +```text $ ./telegraf -config telegraf.conf -input-filter icinga2 -test icinga2_hosts,display_name=router-fr.eqx.fr,check_command=hostalive-custom,host=test-vm,source=localhost,port=5665,scheme=https,state=ok name="router-fr.eqx.fr",state=0 1492021603000000000 ``` diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md index bc5b03543c375..28eed67c7b376 100644 --- a/plugins/inputs/infiniband/README.md +++ b/plugins/inputs/infiniband/README.md @@ -6,14 +6,14 @@ system. These are the counters that can be found in **Supported Platforms**: Linux -### Configuration +## Configuration ```toml [[inputs.infiniband]] # no configuration ``` -### Metrics +## Metrics Actual metrics depend on the InfiniBand devices, the plugin uses a simple mapping from counter -> counter value. @@ -49,10 +49,8 @@ mapping from counter -> counter value. - unicast_xmit_packets (integer) - VL15_dropped (integer) +## Example Output - -### Example Output - -``` +```shell infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 ``` diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 8ba686aab1bd1..3d4ac5a8d40de 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -1,13 +1,13 @@ # InfluxDB Input Plugin -The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our -[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) -for detailed information about `influxdb` metrics. +The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our +[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) +for detailed information about `influxdb` metrics. This plugin can also gather metrics from endpoints that expose InfluxDB-formatted endpoints. See below for more information. -### Configuration: +## Configuration ```toml # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints @@ -37,7 +37,7 @@ InfluxDB-formatted endpoints. See below for more information. timeout = "5s" ``` -### Measurements & Fields +## Measurements & Fields **Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: @@ -80,7 +80,7 @@ InfluxDB-formatted endpoints. See below for more information. - **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. - **bytesWritten**: Total number of bytes written to the hinted handoff queue. - - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. @@ -125,7 +125,7 @@ InfluxDB-formatted endpoints. See below for more information. - **HeapInuse**: Number of bytes in in-use spans. - **HeapObjects**: Number of allocated heap objects. - **HeapReleased**: Number of bytes of physical memory returned to the OS. - - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. - **LastGC**: Time the last garbage collection finished. - **Lookups**: Number of pointer lookups performed by the runtime. - **MCacheInuse**: Number of bytes of allocated mcache structures. @@ -258,9 +258,9 @@ InfluxDB-formatted endpoints. See below for more information. - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. -### Example Output: +## Example Output -``` +```sh telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test * Plugin: influxdb, Collection 1 > influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636 @@ -292,7 +292,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test > influxdb_shard,host=tyrion n_shards=4i 1463590500247354636 ``` -### InfluxDB-formatted endpoints +## InfluxDB-formatted endpoints The influxdb plugin can collect InfluxDB-formatted data from JSON endpoints. Whether associated with an Influx database or not. diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index 0912c36087b75..19cc1069ae658 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -18,7 +18,7 @@ receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. -### Configuration: +## Configuration ```toml [[inputs.influxdb_listener]] @@ -64,14 +64,15 @@ submits data to InfluxDB determines the destination database. # basic_password = "barfoo" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md index 71fa6c19bee3a..11c95c6968d17 100644 --- a/plugins/inputs/influxdb_v2_listener/README.md +++ b/plugins/inputs/influxdb_v2_listener/README.md @@ -11,7 +11,7 @@ defer to the output plugins configuration. Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration ```toml [[inputs.influxdb_v2_listener]] @@ -42,14 +42,15 @@ Telegraf minimum version: Telegraf 1.16.0 # token = "some-long-shared-secret-token" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index 009c8cafc1cfb..4b0b88ab7fc32 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -1,11 +1,13 @@ # Intel PowerStat Input Plugin -This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. -Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. -Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems -to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. +This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. + +Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. +Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems +to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. + +## Configuration -### Configuration: ```toml # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. [[inputs.intel_powerstat]] @@ -17,52 +19,65 @@ to take preventive/corrective actions based on platform busyness, CPU temperatur ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" # cpu_metrics = [] ``` -### Example: Configuration with no per-CPU telemetry + +## Example: Configuration with no per-CPU telemetry + This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: + ```toml [[inputs.intel_powerstat]] cpu_metrics = [] ``` -### Example: Configuration with no per-CPU telemetry - equivalent case +## Example: Configuration with no per-CPU telemetry - equivalent case + This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: + ```toml [[inputs.intel_powerstat]] ``` -### Example: Configuration for CPU Temperature and Frequency only +## Example: Configuration for CPU Temperature and Frequency only + This configuration allows getting global metrics plus subset of per-CPU metrics (CPU Temperature and Current Frequency): + ```toml [[inputs.intel_powerstat]] cpu_metrics = ["cpu_frequency", "cpu_temperature"] ``` -### Example: Configuration with all available metrics +## Example: Configuration with all available metrics + This configuration allows getting global metrics and all per-CPU metrics: + ```toml [[inputs.intel_powerstat]] cpu_metrics = ["cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"] ``` -### SW Dependencies: +## SW Dependencies + Plugin is based on Linux Kernel modules that expose specific metrics over `sysfs` or `devfs` interfaces. The following dependencies are expected by plugin: + - _intel-rapl_ module which exposes Intel Runtime Power Limiting metrics over `sysfs` (`/sys/devices/virtual/powercap/intel-rapl`), - _msr_ kernel module that provides access to processor model specific registers over `devfs` (`/dev/cpu/cpu%d/msr`), -- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). +- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). Minimum kernel version required is 3.13 to satisfy all requirements. Please make sure that kernel modules are loaded and running. You might have to manually enable them by using `modprobe`. Exact commands to be executed are: -``` + +```sh sudo modprobe cpufreq-stats sudo modprobe msr sudo modprobe intel_rapl ``` -**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** +**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** to retrieve data for calculation of most critical per-CPU specific metrics: + - `cpu_busy_frequency_mhz` - `cpu_temperature_celsius` - `cpu_c1_state_residency_percent` @@ -71,23 +86,25 @@ to retrieve data for calculation of most critical per-CPU specific metrics: To expose other Intel PowerStat metrics root access may or may not be required (depending on OS type or configuration). -### HW Dependencies: -Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to -read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. +## HW Dependencies + +Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to +read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. Plugin assumes crucial properties are the same for all CPU cores in the system. The following processor properties are examined in more detail in this section: processor _cpu family_, _model_ and _flags_. The following processor properties are required by the plugin: -- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific + +- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific model specific registers for all features - The following processor flags shall be present: - - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect - the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, + - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect + the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_, _powerstat_core._cpu_c6_state_residency_ - - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, + - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_ - - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ -- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ + - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ +- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ and _powerstat_core.cpu_c6_state_residency_ metrics: | Model number | Processor name | @@ -95,12 +112,12 @@ and _powerstat_core.cpu_c6_state_residency_ metrics: | 0x37 | Intel Atom® Bay Trail | | 0x4D | Intel Atom® Avaton | | 0x5C | Intel Atom® Apollo Lake | -| 0x5F | Intel Atom® Denverton | +| 0x5F | Intel Atom® Denverton | | 0x7A | Intel Atom® Goldmont | | 0x4C | Intel Atom® Airmont | | 0x86 | Intel Atom® Jacobsville | -| 0x96 | Intel Atom® Elkhart Lake | -| 0x9C | Intel Atom® Jasper Lake | +| 0x96 | Intel Atom® Elkhart Lake | +| 0x9C | Intel Atom® Jasper Lake | | 0x1A | Intel Nehalem-EP | | 0x1E | Intel Nehalem | | 0x1F | Intel Nehalem-G | @@ -138,27 +155,32 @@ and _powerstat_core.cpu_c6_state_residency_ metrics: | 0x8F | Intel Sapphire Rapids X | | 0x8C | Intel TigerLake-L | | 0x8D | Intel TigerLake | - -### Metrics + +## Metrics + All metrics collected by Intel PowerStat plugin are collected in fixed intervals. Metrics that reports processor C-state residency or power are calculated over elapsed intervals. When starting to measure metrics, plugin skips first iteration of metrics if they are based on deltas with previous value. - + **The following measurements are supported by Intel PowerStat plugin:** + - powerstat_core - - The following Tags are returned by plugin with powerstat_core measurements: + - The following Tags are returned by plugin with powerstat_core measurements: + ```text | Tag | Description | |-----|-------------| | `package_id` | ID of platform package/socket | - | `core_id` | ID of physical processor core | + | `core_id` | ID of physical processor core | | `cpu_id` | ID of logical processor core | - Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) + Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) while core_id and package_id tags are additional topology information. + ``` - - Available metrics for powerstat_core measurement - + - Available metrics for powerstat_core measurement + + ```text | Metric name (field) | Description | Units | |-----|-------------|-----| | `cpu_frequency_mhz` | Current operational frequency of CPU Core | MHz | @@ -167,31 +189,33 @@ When starting to measure metrics, plugin skips first iteration of metrics if the | `cpu_c1_state_residency_percent` | Percentage of time that CPU Core spent in C1 Core residency state | % | | `cpu_c6_state_residency_percent` | Percentage of time that CPU Core spent in C6 Core residency state | % | | `cpu_busy_cycles_percent` | CPU Core Busy cycles as a ratio of Cycles spent in C0 state residency to all cycles executed by CPU Core | % | - - + ``` - powerstat_package - - The following Tags are returned by plugin with powerstat_package measurements: + - The following Tags are returned by plugin with powerstat_package measurements: + ```text | Tag | Description | |-----|-------------| | `package_id` | ID of platform package/socket | - Measurement powerstat_package metrics are collected per processor package - _package_id_ tag indicates which + Measurement powerstat_package metrics are collected per processor package -_package_id_ tag indicates which package metric refers to. + ``` - - Available metrics for powerstat_package measurement + - Available metrics for powerstat_package measurement + ```text | Metric name (field) | Description | Units | |-----|-------------|-----| - | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | + | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | | `current_power_consumption_watts` | Current power consumption of processor package | Watts | | `current_dram_power_consumption_watts` | Current power consumption of processor package DRAM subsystem | Watts | + ``` +### Example Output -### Example Output: - -``` +```shell powerstat_package,host=ubuntu,package_id=0 thermal_design_power_watts=160 1606494744000000000 powerstat_package,host=ubuntu,package_id=0 current_power_consumption_watts=35 1606494744000000000 powerstat_package,host=ubuntu,package_id=0 current_dram_power_consumption_watts=13.94 1606494744000000000 diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index cc98c13b6c0e0..5c49d08be502d 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -1,22 +1,26 @@ # Intel RDT Input Plugin -The `intel_rdt` plugin collects information provided by monitoring features of -the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor -and control the utilization of shared resources (ex: last level cache, memory bandwidth). -### About Intel RDT +The `intel_rdt` plugin collects information provided by monitoring features of +the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor +and control the utilization of shared resources (ex: last level cache, memory bandwidth). + +## About Intel RDT + Intel’s Resource Director Technology (RDT) framework consists of: -- Cache Monitoring Technology (CMT) + +- Cache Monitoring Technology (CMT) - Memory Bandwidth Monitoring (MBM) -- Cache Allocation Technology (CAT) -- Code and Data Prioritization (CDP) +- Cache Allocation Technology (CAT) +- Code and Data Prioritization (CDP) -As multithreaded and multicore platform architectures emerge, the last level cache and -memory bandwidth are key resources to manage for running workloads in single-threaded, -multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT -and CDP to manage these workloads across shared resources. +As multithreaded and multicore platform architectures emerge, the last level cache and +memory bandwidth are key resources to manage for running workloads in single-threaded, +multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT +and CDP to manage these workloads across shared resources. -### Prerequsities - PQoS Tool -To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a +## Prerequsities - PQoS Tool + +To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. @@ -24,7 +28,7 @@ Note: pqos tool needs root privileges to work properly. Metrics will be constantly reported from the following `pqos` commands within the given interval: -#### If telegraf does not run as the root user +### If telegraf does not run as the root user The `pqos` binary needs to run as root. If telegraf is running as a non-root user, you may enable sudo to allow `pqos` to run correctly. @@ -40,40 +44,46 @@ Alternately, you may enable sudo to allow `pqos` to run correctly, as follows: Add the following to your sudoers file (assumes telegraf runs as a user named `telegraf`): -``` +```sh telegraf ALL=(ALL) NOPASSWD:/usr/sbin/pqos -r --iface-os --mon-file-type=csv --mon-interval=* ``` If you wish to use sudo, you must also add `use_sudo = true` to the Telegraf configuration (see below). -#### In case of cores monitoring: -``` +### In case of cores monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] ``` + where `CORES` is equal to group of cores provided in config. User can provide many groups. -#### In case of process monitoring: -``` +### In case of process monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-pid=all:[PIDS]\;mbt:[PIDS] ``` + where `PIDS` is group of processes IDs which name are equal to provided process name in a config. User can provide many process names which lead to create many processes groups. In both cases `INTERVAL` is equal to sampling_interval from config. -Because PIDs association within system could change in every moment, Intel RDT plugin provides a +Because PIDs association within system could change in every moment, Intel RDT plugin provides a functionality to check on every interval if desired processes change their PIDs association. If some change is reported, plugin will restart _pqos_ tool with new arguments. If provided by user process name is not equal to any of available processes, will be omitted and plugin will constantly check for process availability. -### Useful links -Pqos installation process: https://github.com/intel/intel-cmt-cat/blob/master/INSTALL -Enabling OS interface: https://github.com/intel/intel-cmt-cat/wiki, https://github.com/intel/intel-cmt-cat/wiki/resctrl -More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html +## Useful links + +Pqos installation process: +Enabling OS interface: , +More about Intel RDT: + +## Configuration -### Configuration ```toml # Read Intel RDT metrics [[inputs.intel_rdt]] @@ -81,7 +91,7 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ## This value is propagated to pqos tool. Interval format is defined by pqos itself. ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # sampling_interval = "10" - + ## Optionally specify the path to pqos executable. ## If not provided, auto discovery will be performed. # pqos_path = "/usr/local/bin/pqos" @@ -105,7 +115,8 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t # use_sudo = false ``` -### Exposed metrics +## Exposed metrics + | Name | Full name | Description | |---------------|-----------------------------------------------|-------------| | MBL | Memory Bandwidth on Local NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the local NUMA memory channel | @@ -117,7 +128,8 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t *optional -### Troubleshooting +## Troubleshooting + Pointing to non-existing cores will lead to throwing an error by _pqos_ and the plugin will not work properly. Be sure to check provided core number exists within desired system. @@ -126,13 +138,16 @@ Do not use any other _pqos_ instance that is monitoring the same cores or PIDs w It is not possible to monitor same cores or PIDs on different groups. PIDs associated for the given process could be manually checked by `pidof` command. E.g: -``` + +```sh pidof PROCESS ``` + where `PROCESS` is process name. -### Example Output -``` +## Example Output + +```shell > rdt_metric,cores=12\,19,host=r2-compute-20,name=IPC,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC_Misses,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC,process=top value=0 1598962030000000000 diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index 35e14c77d0fbb..6c2468cd76068 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -5,7 +5,7 @@ The `internal` plugin collects metrics about the telegraf agent itself. Note that some metrics are aggregates across all instances of one type of plugin. -### Configuration: +## Configuration ```toml # Collect statistics about itself @@ -14,71 +14,69 @@ plugin. # collect_memstats = true ``` -### Measurements & Fields: +## Measurements & Fields -memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats +memstats are taken from the Go runtime: - internal_memstats - - alloc_bytes - - frees - - heap_alloc_bytes - - heap_idle_bytes - - heap_in_use_bytes - - heap_objects_bytes - - heap_released_bytes - - heap_sys_bytes - - mallocs - - num_gc - - pointer_lookups - - sys_bytes - - total_alloc_bytes + - alloc_bytes + - frees + - heap_alloc_bytes + - heap_idle_bytes + - heap_in_use_bytes + - heap_objects_bytes + - heap_released_bytes + - heap_sys_bytes + - mallocs + - num_gc + - pointer_lookups + - sys_bytes + - total_alloc_bytes agent stats collect aggregate stats on all telegraf plugins. - internal_agent - - gather_errors - - metrics_dropped - - metrics_gathered - - metrics_written + - gather_errors + - metrics_dropped + - metrics_gathered + - metrics_written internal_gather stats collect aggregate stats on all input plugins that are of the same input type. They are tagged with `input=` `version=` and `go_version=`. - internal_gather - - gather_time_ns - - metrics_gathered + - gather_time_ns + - metrics_gathered internal_write stats collect aggregate stats on all output plugins that are of the same input type. They are tagged with `output=` and `version=`. - - internal_write - - buffer_limit - - buffer_size - - metrics_added - - metrics_written - - metrics_dropped - - metrics_filtered - - write_time_ns + - buffer_limit + - buffer_size + - metrics_added + - metrics_written + - metrics_dropped + - metrics_filtered + - write_time_ns internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of plugin and `version=`. - internal_ - - individual plugin-specific fields, such as requests counts. + - individual plugin-specific fields, such as requests counts. -### Tags: +## Tags All measurements for specific plugins are tagged with information relevant to each particular plugin and with `version=`. +## Example Output -### Example Output: - -``` +```shell internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md index f9a71446f4979..0d10cc7d22655 100644 --- a/plugins/inputs/internet_speed/README.md +++ b/plugins/inputs/internet_speed/README.md @@ -16,7 +16,6 @@ The `Internet Speed Monitor` collects data about the internet speed on the syste It collects latency, download speed and upload speed - | Name | filed name | type | Unit | | -------------- | ---------- | ------- | ---- | | Download Speed | download | float64 | Mbps | @@ -27,4 +26,4 @@ It collects latency, download speed and upload speed ```sh internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index 5da647f47793f..5bd586fa15f61 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -2,7 +2,8 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. -### Configuration +## Configuration + ```toml [[inputs.interrupts]] ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is @@ -18,7 +19,7 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p # irq = [ "NET_RX", "TASKLET" ] ``` -### Metrics +## Metrics There are two styles depending on the value of `cpu_as_tag`. @@ -64,10 +65,11 @@ With `cpu_as_tag = true`: - fields: - count (int, number of interrupts) -### Example Output +## Example Output With `cpu_as_tag = false`: -``` + +```shell interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,cpu=cpu0 count=23i 1489346531000000000 interrupts,irq=1,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,cpu=cpu1 count=1i 1489346531000000000 @@ -75,7 +77,8 @@ soft_interrupts,irq=NET_RX,cpu=cpu0 count=280879i 1489346531000000000 ``` With `cpu_as_tag = true`: -``` + +```shell interrupts,cpu=cpu6,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 interrupts,cpu=cpu7,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 soft_interrupts,cpu=cpu0,irq=HI count=246441i 1543539773000000000 diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 609409985cb35..b704188df68a8 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -5,26 +5,29 @@ Get bare metal metrics using the command line utility If no servers are specified, the plugin will query the local machine sensor stats via the following command: -``` +```sh ipmitool sdr ``` + or with the version 2 schema: -``` + +```sh ipmitool sdr elist ``` When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: -``` +```sh ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` Any of the following parameters will be added to the aformentioned query if they're configured: -``` + +```sh -y hex_key -L privilege ``` -### Configuration +## Configuration ```toml # Read metrics from the bare metal servers via IPMI @@ -72,9 +75,10 @@ Any of the following parameters will be added to the aformentioned query if they # cache_path = "" ``` -### Measurements +## Measurements Version 1 schema: + - ipmi_sensor: - tags: - name @@ -86,6 +90,7 @@ Version 1 schema: - value (float) Version 2 schema: + - ipmi_sensor: - tags: - name @@ -98,17 +103,19 @@ Version 2 schema: - fields: - value (float) -#### Permissions +### Permissions When gathering from the local system, Telegraf will need permission to the ipmi device node. When using udev you can create the device node giving `rw` permissions to the `telegraf` user by adding the following rule to `/etc/udev/rules.d/52-telegraf-ipmi.rules`: -``` +```sh KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ``` + Alternatively, it is possible to use sudo. You will need the following in your telegraf config: + ```toml [[inputs.ipmi_sensor]] use_sudo = true @@ -124,11 +131,13 @@ telegraf ALL=(root) NOPASSWD: IPMITOOL Defaults!IPMITOOL !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output + +### Version 1 Schema -#### Version 1 Schema When retrieving stats from a remote server: -``` + +```shell ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -137,9 +146,9 @@ ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 15171255130 ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` - When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -151,7 +160,8 @@ ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 #### Version 2 Schema When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md index f4477254f117d..945ed43847dba 100644 --- a/plugins/inputs/ipset/README.md +++ b/plugins/inputs/ipset/README.md @@ -5,33 +5,37 @@ It uses the output of the command "ipset save". Ipsets created without the "counters" option are ignored. Results are tagged with: + - ipset name - ipset entry There are 3 ways to grant telegraf the right to run ipset: -* Run as root (strongly discouraged) -* Use sudo -* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. -### Using systemd capabilities +- Run as root (strongly discouraged) +- Use sudo +- Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. + +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```text [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN ``` -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.ipset]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -40,7 +44,7 @@ telegraf ALL=(root) NOPASSWD: IPSETSAVE Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Configuration +## Configuration ```toml [[inputs.ipset]] @@ -56,15 +60,15 @@ Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output -``` +```sh $ sudo ipset save create myset hash:net family inet hashsize 1024 maxelem 65536 counters comment add myset 10.69.152.1 packets 8 bytes 672 comment "machine A" ``` -``` +```sh $ telegraf --config telegraf.conf --input-filter ipset --test --debug * Plugin: inputs.ipset, Collection 1 > ipset,rule=10.69.152.1,host=trashme,set=myset bytes_total=8i,packets_total=672i 1507615028000000000 diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index db730c88178ff..c6d14dd2d41fb 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -14,11 +14,11 @@ The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You ha * Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option. * Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup. -### Using systemd capabilities +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```shell [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN @@ -26,9 +26,10 @@ AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process. -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.iptables]] use_sudo = true @@ -44,11 +45,11 @@ telegraf ALL=(root) NOPASSWD: IPTABLESSHOW Defaults!IPTABLESSHOW !logfile, !syslog, !pam_session ``` -### Using IPtables lock feature +## Using IPtables lock feature Defining multiple instances of this plugin in telegraf.conf can lead to concurrent IPtables access resulting in "ERROR in input [inputs.iptables]: exit status 4" messages in telegraf.log and missing metrics. Setting 'use_lock = true' in the plugin configuration will run IPtables with the '-w' switch, allowing a lock usage to prevent this error. -### Configuration: +## Configuration ```toml # use sudo to run iptables @@ -63,25 +64,24 @@ Defining multiple instances of this plugin in telegraf.conf can lead to concurre chains = [ "INPUT" ] ``` -### Measurements & Fields: - +## Measurements & Fields -- iptables - - pkts (integer, count) - - bytes (integer, bytes) +* iptables + * pkts (integer, count) + * bytes (integer, bytes) -### Tags: +## Tags -- All measurements have the following tags: - - table - - chain - - ruleid +* All measurements have the following tags: + * table + * chain + * ruleid The `ruleid` is the comment associated to the rule. -### Example Output: +## Example Output -``` +```text $ iptables -nvL INPUT Chain INPUT (policy DROP 0 packets, 0 bytes) pkts bytes target prot opt in out source destination @@ -89,7 +89,7 @@ pkts bytes target prot opt in out source destination 42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */ ``` -``` +```shell $ ./telegraf --config telegraf.conf --input-filter iptables --test iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455 iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455 diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index 75e5b51037085..2a44c9d15e47f 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -5,14 +5,14 @@ metrics about ipvs virtual and real servers. **Supported Platforms:** Linux -### Configuration +## Configuration ```toml [[inputs.ipvs]] # no configuration ``` -#### Permissions +### Permissions Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -20,7 +20,7 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -### Metrics +## Metrics Server will contain tags identifying how it was configured, using one of `address` + `port` + `protocol` *OR* `fwmark`. This is how one would normally @@ -66,17 +66,19 @@ configure a virtual server using `ipvsadm`. - pps_out - cps -### Example Output +## Example Output Virtual server is configured using `fwmark` and backed by 2 real servers: -``` + +```shell ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 ``` Virtual server is configured using `proto+addr+port` and backed by 2 real servers: -``` + +```shell ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_fwmark=47 cps=0i,active_connections=0i,inactive_connections=0i,connections=0i,pkts_in=0i,bytes_out=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,pps_out=0i 1541019340000000000 diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index e12326031b9ef..5726af2cbd5a2 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -4,7 +4,7 @@ The jenkins plugin gathers information about the nodes and jobs running in a jen This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. -### Configuration: +## Configuration ```toml [[inputs.jenkins]] @@ -55,7 +55,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API # max_connections = 5 ``` -### Metrics: +## Metrics - jenkins - tags: @@ -65,7 +65,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - busy_executors - total_executors -+ jenkins_node +- jenkins_node - tags: - arch - disk_path @@ -96,23 +96,22 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - number - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) -### Sample Queries: +## Sample Queries -``` +```sql SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) ``` -``` +```sql SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter jenkins --test jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000 jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 ``` - diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 9f2a658f16247..0fdd25a94d86a 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,8 +1,8 @@ # Jolokia Input Plugin -### Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +## Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin -#### Configuration +### Configuration ```toml # Read JMX metrics through Jolokia @@ -66,8 +66,9 @@ The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. -See: https://jolokia.org/ +See: + +## Measurements -# Measurements: Jolokia plugin produces one measure for each metric configured, adding Server's `jolokia_name`, `jolokia_host` and `jolokia_port` as tags. diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index a944949dbab7e..ae4b6a5015042 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -2,9 +2,9 @@ The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). -### Configuration: +## Configuration -#### Jolokia Agent Configuration +### Jolokia Agent Configuration The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints. @@ -34,7 +34,7 @@ Optionally, specify TLS options for communicating with agents: paths = ["Uptime"] ``` -#### Jolokia Proxy Configuration +### Jolokia Proxy Configuration The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint. @@ -79,7 +79,7 @@ Optionally, specify TLS options for communicating with proxies: paths = ["Uptime"] ``` -#### Jolokia Metric Configuration +### Jolokia Metric Configuration Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean. @@ -103,7 +103,7 @@ Use `paths` to refine which fields to collect. The preceeding `jvm_memory` `metric` declaration produces the following output: -``` +```text jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000 ``` @@ -119,7 +119,7 @@ Use `*` wildcards against `mbean` property-key values to create distinct series Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and `name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` declaration produces two metrics. -``` +```shell jvm_garbage_collector,name=G1\ Old\ Generation CollectionCount=0,CollectionTime=0 1503762520000000000 jvm_garbage_collector,name=G1\ Young\ Generation CollectionTime=32,CollectionCount=2 1503762520000000000 ``` @@ -137,7 +137,7 @@ Use `tag_prefix` along with `tag_keys` to add detail to tag names. The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each with a distinct `pool_name` tag. -``` +```text jvm_memory_pool,pool_name=Compressed\ Class\ Space PeakUsage.max=1073741824,PeakUsage.committed=3145728,PeakUsage.init=0,Usage.committed=3145728,Usage.init=0,PeakUsage.used=3017976,Usage.max=1073741824,Usage.used=3017976 1503764025000000000 jvm_memory_pool,pool_name=Code\ Cache PeakUsage.init=2555904,PeakUsage.committed=6291456,Usage.committed=6291456,PeakUsage.used=6202752,PeakUsage.max=251658240,Usage.used=6210368,Usage.max=251658240,Usage.init=2555904 1503764025000000000 jvm_memory_pool,pool_name=G1\ Eden\ Space CollectionUsage.max=-1,PeakUsage.committed=56623104,PeakUsage.init=56623104,PeakUsage.used=53477376,Usage.max=-1,Usage.committed=49283072,Usage.used=19922944,CollectionUsage.committed=49283072,CollectionUsage.init=56623104,CollectionUsage.used=0,PeakUsage.max=-1,Usage.init=56623104 1503764025000000000 @@ -158,7 +158,7 @@ Use substitutions to create fields and field prefixes with MBean property-keys c The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka topic. The `name` Mbean property-key is used as a field prefix to aid in gathering fields together into the single metric. -``` +```text kafka_topic,topic=my-topic BytesOutPerSec.MeanRate=0,FailedProduceRequestsPerSec.MeanRate=0,BytesOutPerSec.EventType="bytes",BytesRejectedPerSec.Count=0,FailedProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.EventType="requests",MessagesInPerSec.RateUnit="SECONDS",BytesInPerSec.EventType="bytes",BytesOutPerSec.RateUnit="SECONDS",BytesInPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.EventType="requests",TotalFetchRequestsPerSec.MeanRate=146.301533938701,BytesOutPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.MeanRate=0,BytesRejectedPerSec.FifteenMinuteRate=0,MessagesInPerSec.FiveMinuteRate=0,BytesInPerSec.Count=0,BytesRejectedPerSec.MeanRate=0,FailedFetchRequestsPerSec.MeanRate=0,FailedFetchRequestsPerSec.FiveMinuteRate=0,FailedFetchRequestsPerSec.FifteenMinuteRate=0,FailedProduceRequestsPerSec.Count=0,TotalFetchRequestsPerSec.FifteenMinuteRate=128.59314292334466,TotalFetchRequestsPerSec.OneMinuteRate=126.71551273850747,TotalFetchRequestsPerSec.Count=1353483,TotalProduceRequestsPerSec.FifteenMinuteRate=0,FailedFetchRequestsPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.Count=0,FailedProduceRequestsPerSec.FifteenMinuteRate=0,TotalFetchRequestsPerSec.FiveMinuteRate=130.8516148751592,TotalFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.RateUnit="SECONDS",BytesInPerSec.MeanRate=0,FailedFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.OneMinuteRate=0,BytesOutPerSec.Count=0,BytesOutPerSec.OneMinuteRate=0,MessagesInPerSec.FifteenMinuteRate=0,MessagesInPerSec.MeanRate=0,BytesInPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.OneMinuteRate=0,TotalProduceRequestsPerSec.EventType="requests",BytesRejectedPerSec.FiveMinuteRate=0,BytesRejectedPerSec.EventType="bytes",BytesOutPerSec.FiveMinuteRate=0,FailedProduceRequestsPerSec.FiveMinuteRate=0,MessagesInPerSec.Count=0,TotalProduceRequestsPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.OneMinuteRate=0,MessagesInPerSec.EventType="messages",MessagesInPerSec.OneMinuteRate=0,TotalFetchRequestsPerSec.EventType="requests",BytesInPerSec.RateUnit="SECONDS",BytesInPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.Count=0 1503767532000000000 ``` @@ -170,7 +170,7 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration | `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. | | `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. | -### Example Configurations: +## Example Configurations - [ActiveMQ](/plugins/inputs/jolokia2/examples/activemq.conf) - [BitBucket](/plugins/inputs/jolokia2/examples/bitbucket.conf) diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index 1a28b55aeb8d9..895a4b5cf3de6 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -3,7 +3,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data from listed sensors using Junos Telemetry Interface. Refer to [openconfig.net](http://openconfig.net/) for more details about OpenConfig and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html). -### Configuration: +## Configuration ```toml # Subscribe and receive OpenConfig Telemetry data using JTI @@ -57,7 +57,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f str_as_tags = false ``` -### Tags: +## Tags - All measurements are tagged appropriately using the identifier information in incoming data diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index f4629ed4e11e3..0083142fe7f2e 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -6,7 +6,7 @@ and creates metrics using one of the supported [input data formats][]. For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input plugin and use the old zookeeper connection method. -### Configuration +## Configuration ```toml [[inputs.kafka_consumer]] diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 86ccaa4c1dc09..1faf4c2305e3d 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,6 +1,6 @@ # Kafka Consumer Legacy Input Plugin -### Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer). +## Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer) The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 6a70387ee587b..ece2210e62688 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -2,7 +2,7 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. -### Configuration: +## Configuration ```toml [[inputs.kapacitor]] @@ -23,276 +23,334 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. # insecure_skip_verify = false ``` -### Measurements and fields +## Measurements and fields - [kapacitor](#kapacitor) - - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ - - [num_subscriptions](#num_subscriptions) _(integer)_ - - [num_tasks](#num_tasks) _(integer)_ + - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ + - [num_subscriptions](#num_subscriptions) _(integer)_ + - [num_tasks](#num_tasks) _(integer)_ - [kapacitor_alert](#kapacitor_alert) - - [notification_dropped](#notification_dropped) _(integer)_ - - [primary-handle-count](#primary-handle-count) _(integer)_ - - [secondary-handle-count](#secondary-handle-count) _(integer)_ + - [notification_dropped](#notification_dropped) _(integer)_ + - [primary-handle-count](#primary-handle-count) _(integer)_ + - [secondary-handle-count](#secondary-handle-count) _(integer)_ - (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ - - [dropped_member_events](#dropped_member_events) _(integer)_ - - [dropped_user_events](#dropped_user_events) _(integer)_ - - [query_handler_errors](#query_handler_errors) _(integer)_ + - [dropped_member_events](#dropped_member_events) _(integer)_ + - [dropped_user_events](#dropped_user_events) _(integer)_ + - [query_handler_errors](#query_handler_errors) _(integer)_ - [kapacitor_edges](#kapacitor_edges) - - [collected](#collected) _(integer)_ - - [emitted](#emitted) _(integer)_ + - [collected](#collected) _(integer)_ + - [emitted](#emitted) _(integer)_ - [kapacitor_ingress](#kapacitor_ingress) - - [points_received](#points_received) _(integer)_ + - [points_received](#points_received) _(integer)_ - [kapacitor_load](#kapacitor_load) - - [errors](#errors) _(integer)_ + - [errors](#errors) _(integer)_ - [kapacitor_memstats](#kapacitor_memstats) - - [alloc_bytes](#alloc_bytes) _(integer)_ - - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ - - [frees](#frees) _(integer)_ - - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ - - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ - - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ - - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ - - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ - - [heap_objects](#heap_objects) _(integer)_ - - [heap_released_bytes](#heap_released_bytes) _(integer)_ - - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ - - [last_gc_ns](#last_gc_ns) _(integer)_ - - [lookups](#lookups) _(integer)_ - - [mallocs](#mallocs) _(integer)_ - - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ - - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ - - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ - - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ - - [next_gc_ns](#next_gc_ns) _(integer)_ - - [num_gc](#num_gc) _(integer)_ - - [other_sys_bytes](#other_sys_bytes) _(integer)_ - - [pause_total_ns](#pause_total_ns) _(integer)_ - - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ - - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ - - [sys_bytes](#sys_bytes) _(integer)_ - - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ + - [alloc_bytes](#alloc_bytes) _(integer)_ + - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ + - [frees](#frees) _(integer)_ + - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ + - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ + - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ + - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ + - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ + - [heap_objects](#heap_objects) _(integer)_ + - [heap_released_bytes](#heap_released_bytes) _(integer)_ + - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ + - [last_gc_ns](#last_gc_ns) _(integer)_ + - [lookups](#lookups) _(integer)_ + - [mallocs](#mallocs) _(integer)_ + - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ + - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ + - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ + - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ + - [next_gc_ns](#next_gc_ns) _(integer)_ + - [num_gc](#num_gc) _(integer)_ + - [other_sys_bytes](#other_sys_bytes) _(integer)_ + - [pause_total_ns](#pause_total_ns) _(integer)_ + - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ + - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ + - [sys_bytes](#sys_bytes) _(integer)_ + - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ - [kapacitor_nodes](#kapacitor_nodes) - - [alerts_inhibited](#alerts_inhibited) _(integer)_ - - [alerts_triggered](#alerts_triggered) _(integer)_ - - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ - - [crits_triggered](#crits_triggered) _(integer)_ - - [errors](#errors) _(integer)_ - - [infos_triggered](#infos_triggered) _(integer)_ - - [oks_triggered](#oks_triggered) _(integer)_ - - [points_written](#points_written) _(integer)_ - - [warns_triggered](#warns_triggered) _(integer)_ - - [write_errors](#write_errors) _(integer)_ + - [alerts_inhibited](#alerts_inhibited) _(integer)_ + - [alerts_triggered](#alerts_triggered) _(integer)_ + - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ + - [crits_triggered](#crits_triggered) _(integer)_ + - [errors](#errors) _(integer)_ + - [infos_triggered](#infos_triggered) _(integer)_ + - [oks_triggered](#oks_triggered) _(integer)_ + - [points_written](#points_written) _(integer)_ + - [warns_triggered](#warns_triggered) _(integer)_ + - [write_errors](#write_errors) _(integer)_ - [kapacitor_topics](#kapacitor_topics) - - [collected](#collected) _(integer)_ - + - [collected](#collected) _(integer)_ --- -### kapacitor +## kapacitor + The `kapacitor` measurement stores fields with information related to [Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks) and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/). -#### num_enabled_tasks +### num_enabled_tasks + The number of enabled Kapacitor tasks. -#### num_subscriptions +### num_subscriptions + The number of Kapacitor/InfluxDB subscriptions. -#### num_tasks +### num_tasks + The total number of Kapacitor tasks. --- -### kapacitor_alert +## kapacitor_alert + The `kapacitor_alert` measurement stores fields with information related to [Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). -#### notification-dropped +### notification-dropped + The number of internal notifications dropped because they arrive too late from another Kapacitor node. If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough to keep up with the volume of alerts. -#### primary-handle-count +### primary-handle-count + The number of times this node handled an alert as the primary. This count should increase under normal conditions. -#### secondary-handle-count +### secondary-handle-count + The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner. --- -### kapacitor_cluster +## kapacitor_cluster + The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes. -#### dropped_member_events +### dropped_member_events + The number of gossip member events that were dropped. -#### dropped_user_events +### dropped_user_events + The number of gossip user events that were dropped. --- -### kapacitor_edges +## kapacitor_edges + The `kapacitor_edges` measurement stores fields with information related to [edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) in Kapacitor TICKscripts. -#### collected +### collected + The number of messages collected by TICKscript edges. -#### emitted +### emitted + The number of messages emitted by TICKscript edges. --- -### kapacitor_ingress +## kapacitor_ingress + The `kapacitor_ingress` measurement stores fields with information related to data coming into Kapacitor. -#### points_received +### points_received + The number of points received by Kapacitor. --- -### kapacitor_load +## kapacitor_load + The `kapacitor_load` measurement stores fields with information related to the [Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/). -#### errors +### errors + The number of errors reported from the load directory service. --- -### kapacitor_memstats +## kapacitor_memstats + The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage. -#### alloc_bytes +### alloc_bytes + The number of bytes of memory allocated by Kapacitor that are still in use. -#### buck_hash_sys_bytes +### buck_hash_sys_bytes + The number of bytes of memory used by the profiling bucket hash table. -#### frees +### frees + The number of heap objects freed. -#### gc_sys_bytes +### gc_sys_bytes + The number of bytes of memory used for garbage collection system metadata. -#### gc_cpu_fraction +### gc_cpu_fraction + The fraction of Kapacitor's available CPU time used by garbage collection since Kapacitor started. -#### heap_alloc_bytes +### heap_alloc_bytes + The number of reachable and unreachable heap objects garbage collection has not freed. -#### heap_idle_bytes +### heap_idle_bytes + The number of heap bytes waiting to be used. -#### heap_in_use_bytes +### heap_in_use_bytes + The number of heap bytes in use. -#### heap_objects +### heap_objects + The number of allocated objects. -#### heap_released_bytes +### heap_released_bytes + The number of heap bytes released to the operating system. -#### heap_sys_bytes +### heap_sys_bytes + The number of heap bytes obtained from `system`. -#### last_gc_ns +### last_gc_ns + The nanosecond epoch time of the last garbage collection. -#### lookups +### lookups + The total number of pointer lookups. -#### mallocs +### mallocs + The total number of mallocs. -#### mcache_in_use_bytes +### mcache_in_use_bytes + The number of bytes in use by mcache structures. -#### mcache_sys_bytes +### mcache_sys_bytes + The number of bytes used for mcache structures obtained from `system`. -#### mspan_in_use_bytes +### mspan_in_use_bytes + The number of bytes in use by mspan structures. -#### mspan_sys_bytes +### mspan_sys_bytes + The number of bytes used for mspan structures obtained from `system`. -#### next_gc_ns +### next_gc_ns + The nanosecond epoch time of the next garbage collection. -#### num_gc +### num_gc + The number of completed garbage collection cycles. -#### other_sys_bytes +### other_sys_bytes + The number of bytes used for other system allocations. -#### pause_total_ns +### pause_total_ns + The total number of nanoseconds spent in garbage collection "stop-the-world" pauses since Kapacitor started. -#### stack_in_use_bytes +### stack_in_use_bytes + The number of bytes in use by the stack allocator. -#### stack_sys_bytes +### stack_sys_bytes + The number of bytes obtained from `system` for stack allocator. -#### sys_bytes +### sys_bytes + The number of bytes of memory obtained from `system`. -#### total_alloc_bytes +### total_alloc_bytes + The total number of bytes allocated, even if freed. --- -### kapacitor_nodes +## kapacitor_nodes + The `kapacitor_nodes` measurement stores fields related to events that occur in [TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/). -#### alerts_inhibited +### alerts_inhibited + The total number of alerts inhibited by TICKscripts. -#### alerts_triggered +### alerts_triggered + The total number of alerts triggered by TICKscripts. -#### avg_exec_time_ns +### avg_exec_time_ns + The average execution time of TICKscripts in nanoseconds. -#### crits_triggered +### crits_triggered + The number of critical (`crit`) alerts triggered by TICKscripts. -#### errors +### errors (from TICKscripts) + The number of errors caused caused by TICKscripts. -#### infos_triggered +### infos_triggered + The number of info (`info`) alerts triggered by TICKscripts. -#### oks_triggered +### oks_triggered + The number of ok (`ok`) alerts triggered by TICKscripts. #### points_written + The number of points written to InfluxDB or back to Kapacitor. #### warns_triggered + The number of warning (`warn`) alerts triggered by TICKscripts. #### working_cardinality + The total number of unique series processed. #### write_errors + The number of errors that occurred when writing to InfluxDB or other write endpoints. --- ### kapacitor_topics + The `kapacitor_topics` measurement stores fields related to -Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). +Kapacitor topics](). + +#### collected (kapacitor_topics) -#### collected The number of events collected by Kapacitor topics. --- @@ -303,7 +361,7 @@ these values. ## Example Output -``` +```shell $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test * Plugin: inputs.kapacitor, Collection 1 > kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 diff --git a/plugins/inputs/kernel/README.md b/plugins/inputs/kernel/README.md index 0f28bf7770370..d3467e826db8f 100644 --- a/plugins/inputs/kernel/README.md +++ b/plugins/inputs/kernel/README.md @@ -9,7 +9,7 @@ not covered by other plugins as well as the value of `/proc/sys/kernel/random/en The metrics are documented in `man proc` under the `/proc/stat` section. The metrics are documented in `man 4 random` under the `/proc/stat` section. -``` +```text /proc/sys/kernel/random/entropy_avail @@ -39,7 +39,7 @@ processes 86031 Number of forks since boot. ``` -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/stat @@ -47,24 +47,24 @@ Number of forks since boot. # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel - - boot_time (integer, seconds since epoch, `btime`) - - context_switches (integer, `ctxt`) - - disk_pages_in (integer, `page (0)`) - - disk_pages_out (integer, `page (1)`) - - interrupts (integer, `intr`) - - processes_forked (integer, `processes`) - - entropy_avail (integer, `entropy_available`) + - boot_time (integer, seconds since epoch, `btime`) + - context_switches (integer, `ctxt`) + - disk_pages_in (integer, `page (0)`) + - disk_pages_out (integer, `page (1)`) + - interrupts (integer, `intr`) + - processes_forked (integer, `processes`) + - entropy_avail (integer, `entropy_available`) -### Tags: +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel --test * Plugin: kernel, Collection 1 > kernel entropy_available=2469i,boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816 diff --git a/plugins/inputs/kernel_vmstat/README.md b/plugins/inputs/kernel_vmstat/README.md index 3ca6a097c1456..a5e54f158c4f7 100644 --- a/plugins/inputs/kernel_vmstat/README.md +++ b/plugins/inputs/kernel_vmstat/README.md @@ -1,13 +1,12 @@ # Kernel VMStat Input Plugin -The kernel_vmstat plugin gathers virtual memory statistics -by reading /proc/vmstat. For a full list of available fields see the +The kernel_vmstat plugin gathers virtual memory statistics +by reading /proc/vmstat. For a full list of available fields see the /proc/vmstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). -For a better idea of what each field represents, see the +For a better idea of what each field represents, see the [vmstat man page](http://linux.die.net/man/8/vmstat). - -``` +```text /proc/vmstat kernel/system statistics. Common entries include (from http://www.linuxinsight.com/proc_vmstat.html): @@ -109,7 +108,7 @@ pgrotated 3781 nr_bounce 0 ``` -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/vmstat @@ -117,108 +116,108 @@ nr_bounce 0 # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel_vmstat - - nr_free_pages (integer, `nr_free_pages`) - - nr_inactive_anon (integer, `nr_inactive_anon`) - - nr_active_anon (integer, `nr_active_anon`) - - nr_inactive_file (integer, `nr_inactive_file`) - - nr_active_file (integer, `nr_active_file`) - - nr_unevictable (integer, `nr_unevictable`) - - nr_mlock (integer, `nr_mlock`) - - nr_anon_pages (integer, `nr_anon_pages`) - - nr_mapped (integer, `nr_mapped`) - - nr_file_pages (integer, `nr_file_pages`) - - nr_dirty (integer, `nr_dirty`) - - nr_writeback (integer, `nr_writeback`) - - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) - - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) - - nr_page_table_pages (integer, `nr_page_table_pages`) - - nr_kernel_stack (integer, `nr_kernel_stack`) - - nr_unstable (integer, `nr_unstable`) - - nr_bounce (integer, `nr_bounce`) - - nr_vmscan_write (integer, `nr_vmscan_write`) - - nr_writeback_temp (integer, `nr_writeback_temp`) - - nr_isolated_anon (integer, `nr_isolated_anon`) - - nr_isolated_file (integer, `nr_isolated_file`) - - nr_shmem (integer, `nr_shmem`) - - numa_hit (integer, `numa_hit`) - - numa_miss (integer, `numa_miss`) - - numa_foreign (integer, `numa_foreign`) - - numa_interleave (integer, `numa_interleave`) - - numa_local (integer, `numa_local`) - - numa_other (integer, `numa_other`) - - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) - - pgpgin (integer, `pgpgin`) - - pgpgout (integer, `pgpgout`) - - pswpin (integer, `pswpin`) - - pswpout (integer, `pswpout`) - - pgalloc_dma (integer, `pgalloc_dma`) - - pgalloc_dma32 (integer, `pgalloc_dma32`) - - pgalloc_normal (integer, `pgalloc_normal`) - - pgalloc_movable (integer, `pgalloc_movable`) - - pgfree (integer, `pgfree`) - - pgactivate (integer, `pgactivate`) - - pgdeactivate (integer, `pgdeactivate`) - - pgfault (integer, `pgfault`) - - pgmajfault (integer, `pgmajfault`) - - pgrefill_dma (integer, `pgrefill_dma`) - - pgrefill_dma32 (integer, `pgrefill_dma32`) - - pgrefill_normal (integer, `pgrefill_normal`) - - pgrefill_movable (integer, `pgrefill_movable`) - - pgsteal_dma (integer, `pgsteal_dma`) - - pgsteal_dma32 (integer, `pgsteal_dma32`) - - pgsteal_normal (integer, `pgsteal_normal`) - - pgsteal_movable (integer, `pgsteal_movable`) - - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) - - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) - - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) - - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) - - pgscan_direct_dma (integer, `pgscan_direct_dma`) - - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) - - pgscan_direct_normal (integer, `pgscan_direct_normal`) - - pgscan_direct_movable (integer, `pgscan_direct_movable`) - - zone_reclaim_failed (integer, `zone_reclaim_failed`) - - pginodesteal (integer, `pginodesteal`) - - slabs_scanned (integer, `slabs_scanned`) - - kswapd_steal (integer, `kswapd_steal`) - - kswapd_inodesteal (integer, `kswapd_inodesteal`) - - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) - - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) - - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) - - pageoutrun (integer, `pageoutrun`) - - allocstall (integer, `allocstall`) - - pgrotated (integer, `pgrotated`) - - compact_blocks_moved (integer, `compact_blocks_moved`) - - compact_pages_moved (integer, `compact_pages_moved`) - - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) - - compact_stall (integer, `compact_stall`) - - compact_fail (integer, `compact_fail`) - - compact_success (integer, `compact_success`) - - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) - - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) - - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) - - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) - - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) - - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) - - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) - - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) - - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) - - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) - - thp_fault_alloc (integer, `thp_fault_alloc`) - - thp_fault_fallback (integer, `thp_fault_fallback`) - - thp_collapse_alloc (integer, `thp_collapse_alloc`) - - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) - - thp_split (integer, `thp_split`) - -### Tags: + - nr_free_pages (integer, `nr_free_pages`) + - nr_inactive_anon (integer, `nr_inactive_anon`) + - nr_active_anon (integer, `nr_active_anon`) + - nr_inactive_file (integer, `nr_inactive_file`) + - nr_active_file (integer, `nr_active_file`) + - nr_unevictable (integer, `nr_unevictable`) + - nr_mlock (integer, `nr_mlock`) + - nr_anon_pages (integer, `nr_anon_pages`) + - nr_mapped (integer, `nr_mapped`) + - nr_file_pages (integer, `nr_file_pages`) + - nr_dirty (integer, `nr_dirty`) + - nr_writeback (integer, `nr_writeback`) + - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) + - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) + - nr_page_table_pages (integer, `nr_page_table_pages`) + - nr_kernel_stack (integer, `nr_kernel_stack`) + - nr_unstable (integer, `nr_unstable`) + - nr_bounce (integer, `nr_bounce`) + - nr_vmscan_write (integer, `nr_vmscan_write`) + - nr_writeback_temp (integer, `nr_writeback_temp`) + - nr_isolated_anon (integer, `nr_isolated_anon`) + - nr_isolated_file (integer, `nr_isolated_file`) + - nr_shmem (integer, `nr_shmem`) + - numa_hit (integer, `numa_hit`) + - numa_miss (integer, `numa_miss`) + - numa_foreign (integer, `numa_foreign`) + - numa_interleave (integer, `numa_interleave`) + - numa_local (integer, `numa_local`) + - numa_other (integer, `numa_other`) + - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) + - pgpgin (integer, `pgpgin`) + - pgpgout (integer, `pgpgout`) + - pswpin (integer, `pswpin`) + - pswpout (integer, `pswpout`) + - pgalloc_dma (integer, `pgalloc_dma`) + - pgalloc_dma32 (integer, `pgalloc_dma32`) + - pgalloc_normal (integer, `pgalloc_normal`) + - pgalloc_movable (integer, `pgalloc_movable`) + - pgfree (integer, `pgfree`) + - pgactivate (integer, `pgactivate`) + - pgdeactivate (integer, `pgdeactivate`) + - pgfault (integer, `pgfault`) + - pgmajfault (integer, `pgmajfault`) + - pgrefill_dma (integer, `pgrefill_dma`) + - pgrefill_dma32 (integer, `pgrefill_dma32`) + - pgrefill_normal (integer, `pgrefill_normal`) + - pgrefill_movable (integer, `pgrefill_movable`) + - pgsteal_dma (integer, `pgsteal_dma`) + - pgsteal_dma32 (integer, `pgsteal_dma32`) + - pgsteal_normal (integer, `pgsteal_normal`) + - pgsteal_movable (integer, `pgsteal_movable`) + - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) + - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) + - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) + - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) + - pgscan_direct_dma (integer, `pgscan_direct_dma`) + - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) + - pgscan_direct_normal (integer, `pgscan_direct_normal`) + - pgscan_direct_movable (integer, `pgscan_direct_movable`) + - zone_reclaim_failed (integer, `zone_reclaim_failed`) + - pginodesteal (integer, `pginodesteal`) + - slabs_scanned (integer, `slabs_scanned`) + - kswapd_steal (integer, `kswapd_steal`) + - kswapd_inodesteal (integer, `kswapd_inodesteal`) + - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) + - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) + - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) + - pageoutrun (integer, `pageoutrun`) + - allocstall (integer, `allocstall`) + - pgrotated (integer, `pgrotated`) + - compact_blocks_moved (integer, `compact_blocks_moved`) + - compact_pages_moved (integer, `compact_pages_moved`) + - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) + - compact_stall (integer, `compact_stall`) + - compact_fail (integer, `compact_fail`) + - compact_success (integer, `compact_success`) + - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) + - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) + - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) + - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) + - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) + - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) + - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) + - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) + - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) + - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) + - thp_fault_alloc (integer, `thp_fault_alloc`) + - thp_fault_fallback (integer, `thp_fault_fallback`) + - thp_collapse_alloc (integer, `thp_collapse_alloc`) + - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) + - thp_split (integer, `thp_split`) + +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel_vmstat --test * Plugin: kernel_vmstat, Collection 1 > kernel_vmstat allocstall=81496i,compact_blocks_moved=238196i,compact_fail=135220i,compact_pagemigrate_failed=0i,compact_pages_moved=6370588i,compact_stall=142092i,compact_success=6872i,htlb_buddy_alloc_fail=0i,htlb_buddy_alloc_success=0i,kswapd_high_wmark_hit_quickly=25439i,kswapd_inodesteal=29770874i,kswapd_low_wmark_hit_quickly=8756i,kswapd_skip_congestion_wait=0i,kswapd_steal=291534428i,nr_active_anon=2515657i,nr_active_file=2244914i,nr_anon_pages=1358675i,nr_anon_transparent_hugepages=2034i,nr_bounce=0i,nr_dirty=5690i,nr_file_pages=5153546i,nr_free_pages=78730i,nr_inactive_anon=426259i,nr_inactive_file=2366791i,nr_isolated_anon=0i,nr_isolated_file=0i,nr_kernel_stack=579i,nr_mapped=558821i,nr_mlock=0i,nr_page_table_pages=11115i,nr_shmem=541689i,nr_slab_reclaimable=459806i,nr_slab_unreclaimable=47859i,nr_unevictable=0i,nr_unstable=0i,nr_vmscan_write=6206i,nr_writeback=0i,nr_writeback_temp=0i,numa_foreign=0i,numa_hit=5113399878i,numa_interleave=35793i,numa_local=5113399878i,numa_miss=0i,numa_other=0i,pageoutrun=505006i,pgactivate=375664931i,pgalloc_dma=0i,pgalloc_dma32=122480220i,pgalloc_movable=0i,pgalloc_normal=5233176719i,pgdeactivate=122735906i,pgfault=8699921410i,pgfree=5359765021i,pginodesteal=9188431i,pgmajfault=122210i,pgpgin=219717626i,pgpgout=3495885510i,pgrefill_dma=0i,pgrefill_dma32=1180010i,pgrefill_movable=0i,pgrefill_normal=119866676i,pgrotated=60620i,pgscan_direct_dma=0i,pgscan_direct_dma32=12256i,pgscan_direct_movable=0i,pgscan_direct_normal=31501600i,pgscan_kswapd_dma=0i,pgscan_kswapd_dma32=4480608i,pgscan_kswapd_movable=0i,pgscan_kswapd_normal=287857984i,pgsteal_dma=0i,pgsteal_dma32=4466436i,pgsteal_movable=0i,pgsteal_normal=318463755i,pswpin=2092i,pswpout=6206i,slabs_scanned=93775616i,thp_collapse_alloc=24857i,thp_collapse_alloc_failed=102214i,thp_fault_alloc=346219i,thp_fault_fallback=895453i,thp_split=9817i,unevictable_pgs_cleared=0i,unevictable_pgs_culled=1531i,unevictable_pgs_mlocked=6988i,unevictable_pgs_mlockfreed=0i,unevictable_pgs_munlocked=6988i,unevictable_pgs_rescued=5426i,unevictable_pgs_scanned=0i,unevictable_pgs_stranded=0i,zone_reclaim_failed=0i 1459455200071462843 diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index a5002d5f21204..248f21a47aa58 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -7,7 +7,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. [Kibana]: https://www.elastic.co/ -### Configuration +## Configuration ```toml [[inputs.kibana]] @@ -29,7 +29,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. # insecure_skip_verify = false ``` -### Metrics +## Metrics - kibana - tags: @@ -48,9 +48,9 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. - concurrent_connections (integer) - requests_per_sec (float) -### Example Output +## Example Output -``` +```shell kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` @@ -58,8 +58,8 @@ kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5 Requires the following tools: -* [Docker](https://docs.docker.com/get-docker/) -* [Docker Compose](https://docs.docker.com/compose/install/) +- [Docker](https://docs.docker.com/get-docker/) +- [Docker Compose](https://docs.docker.com/compose/install/) From the root of this project execute the following script: `./plugins/inputs/kibana/test_environment/run_test_env.sh` @@ -67,4 +67,4 @@ This will build the latest Telegraf and then start up Kibana and Elasticsearch, Then you can attach to the telegraf container to inspect the file `/tmp/metrics.out` to see if the status is being reported. -The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. \ No newline at end of file +The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index ba1a7580fd29b..681c77c636ce7 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -3,8 +3,7 @@ The [Kinesis][kinesis] consumer plugin reads from a Kinesis data stream and creates metrics using one of the supported [input data formats][]. - -### Configuration +## Configuration ```toml [[inputs.kinesis_consumer]] @@ -74,29 +73,28 @@ and creates metrics using one of the supported [input data formats][]. table_name = "default" ``` - -#### Required AWS IAM permissions +### Required AWS IAM permissions Kinesis: - - DescribeStream - - GetRecords - - GetShardIterator + +- DescribeStream +- GetRecords +- GetShardIterator DynamoDB: - - GetItem - - PutItem +- GetItem +- PutItem -#### DynamoDB Checkpoint +### DynamoDB Checkpoint The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage this functionality, create a table with the following string type keys: -``` +```shell Partition key: namespace Sort key: shard_id ``` - [kinesis]: https://aws.amazon.com/kinesis/ [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index 518dd5d7f3720..14d77556f0465 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -3,9 +3,9 @@ The KNX input plugin that listens for messages on the KNX home-automation bus. This plugin connects to the KNX bus via a KNX-IP interface. Information about supported KNX message datapoint types can be found at the -underlying "knx-go" project site (https://github.com/vapourismo/knx-go). +underlying "knx-go" project site (). -### Configuration +## Configuration This is a sample config for the plugin. @@ -34,7 +34,7 @@ This is a sample config for the plugin. # addresses = ["5/5/3"] ``` -#### Measurement configurations +### Measurement configurations Each measurement contains only one datapoint-type (DPT) and assigns a list of addresses to this measurement. You can, for example group all temperature sensor @@ -43,23 +43,24 @@ messages of one datapoint-type to multiple measurements. **NOTE: You should not assign a group-address (GA) to multiple measurements!** -### Metrics +## Metrics Received KNX data is stored in the named measurement as configured above using the "value" field. Additional to the value, there are the following tags added to the datapoint: - - "groupaddress": KNX group-address corresponding to the value - - "unit": unit of the value - - "source": KNX physical address sending the value + +- "groupaddress": KNX group-address corresponding to the value +- "unit": unit of the value +- "source": KNX physical address sending the value To find out about the datatype of the datapoint please check your KNX project, the KNX-specification or the "knx-go" project for the corresponding DPT. -### Example Output +## Example Output This section shows example output in Line Protocol format. -``` +```shell illumination,groupaddress=5/5/4,host=Hugin,source=1.1.12,unit=lux value=17.889999389648438 1582132674999013274 temperature,groupaddress=5/5/1,host=Hugin,source=1.1.8,unit=°C value=17.799999237060547 1582132663427587361 windowopen,groupaddress=1/0/1,host=Hugin,source=1.1.3 value=true 1582132630425581320 diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 7803d4fc4e9eb..847efd7ffae1d 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -19,7 +19,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -31,7 +31,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration: +## Configuration ```toml [[inputs.kube_inventory]] @@ -81,7 +81,7 @@ avoid cardinality issues: # fielddrop = ["terminated_reason"] ``` -#### Kubernetes Permissions +## Kubernetes Permissions If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. @@ -150,7 +150,7 @@ tls_cert = "/run/telegraf-kubernetes-cert" tls_key = "/run/telegraf-kubernetes-key" ``` -### Metrics: +## Metrics - kubernetes_daemonset - tags: @@ -167,7 +167,7 @@ tls_key = "/run/telegraf-kubernetes-key" - number_unavailable - updated_number_scheduled -* kubernetes_deployment +- kubernetes_deployment - tags: - deployment_name - namespace @@ -192,7 +192,7 @@ tls_key = "/run/telegraf-kubernetes-key" - ready - port -* kubernetes_ingress +- kubernetes_ingress - tags: - ingress_name - namespace @@ -220,7 +220,7 @@ tls_key = "/run/telegraf-kubernetes-key" - allocatable_memory_bytes - allocatable_pods -* kubernetes_persistentvolume +- kubernetes_persistentvolume - tags: - pv_name - phase @@ -238,7 +238,7 @@ tls_key = "/run/telegraf-kubernetes-key" - fields: - phase_type (int, [see below](#pvc-phase_type)) -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace @@ -274,7 +274,7 @@ tls_key = "/run/telegraf-kubernetes-key" - port - target_port -* kubernetes_statefulset +- kubernetes_statefulset - tags: - statefulset_name - namespace @@ -289,7 +289,7 @@ tls_key = "/run/telegraf-kubernetes-key" - spec_replicas - observed_generation -#### pv `phase_type` +### pv `phase_type` The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. @@ -302,7 +302,7 @@ The persistentvolume "phase" is saved in the `phase` tag with a correlated numer | available | 4 | | unknown | 5 | -#### pvc `phase_type` +### pvc `phase_type` The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. @@ -313,9 +313,9 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated | pending | 2 | | unknown | 3 | -### Example Output: +## Example Output -``` +```shell kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 8ef5ef7b1dfca..07907f7eba18e 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -8,8 +8,8 @@ should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: -``` -$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' +```sh +curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' ``` In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. @@ -20,7 +20,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -32,7 +32,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration ```toml [[inputs.kubernetes]] @@ -62,7 +62,7 @@ avoid cardinality issues: # insecure_skip_verify = false ``` -### DaemonSet +## DaemonSet For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes Architecture][k8s-telegraf] or view the Helm charts: @@ -72,7 +72,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - [Chronograf][] - [Kapacitor][] -### Metrics +## Metrics - kubernetes_node - tags: @@ -97,7 +97,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - runtime_image_fs_capacity_bytes - runtime_image_fs_used_bytes -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace @@ -129,7 +129,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - capacity_bytes - used_bytes -* kubernetes_pod_network +- kubernetes_pod_network - tags: - namespace - node_name @@ -140,9 +140,9 @@ Architecture][k8s-telegraf] or view the Helm charts: - tx_bytes - tx_errors -### Example Output +## Example Output -``` +```shell kubernetes_node kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md index c47b22fee1dd1..f308b1218e46a 100644 --- a/plugins/inputs/lanz/README.md +++ b/plugins/inputs/lanz/README.md @@ -5,18 +5,18 @@ This plugin provides a consumer for use with Arista Networks’ Latency Analyzer Metrics are read from a stream of data via TCP through port 50001 on the switches management IP. The data is in Protobuffers format. For more information on Arista LANZ -- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz +- This plugin uses Arista's sdk. -- https://github.com/aristanetworks/goarista +- -### Configuration +## Configuration You will need to configure LANZ and enable streaming LANZ data. -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292 +- +- ```toml [[inputs.lanz]] @@ -26,9 +26,9 @@ You will need to configure LANZ and enable streaming LANZ data. ] ``` -### Metrics +## Metrics -For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto +For more details on the metrics see - lanz_congestion_record: - tags: @@ -47,7 +47,7 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - tx_latency (integer) - q_drop_count (integer) -+ lanz_global_buffer_usage_record +- lanz_global_buffer_usage_record - tags: - entry_type - source @@ -57,31 +57,31 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - buffer_size (integer) - duration (integer) - - -### Sample Queries +## Sample Queries Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max buffer_size for over the last hour for all switches. + ```sql SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname" ``` -### Example output -``` +## Example output + +```shell lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815 lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045 lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464 lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302 ``` - - diff --git a/plugins/inputs/leofs/README.md b/plugins/inputs/leofs/README.md index bd028e65ab048..db77e8a527d9f 100644 --- a/plugins/inputs/leofs/README.md +++ b/plugins/inputs/leofs/README.md @@ -2,7 +2,7 @@ The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [LeoFS Documentation / System Administration / System Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/). -## Configuration: +## Configuration ```toml # Sample Config: @@ -11,57 +11,60 @@ The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using servers = ["127.0.0.1:4010"] ``` -## Measurements & Fields: +## Measurements & Fields + ### Statistics specific to the internals of LeoManager -#### Erlang VM + +#### Erlang VM of LeoManager - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min ### Statistics specific to the internals of LeoStorage -#### Erlang VM + +### Erlang VM of LeoStorage - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +### Total Number of Requests for LeoStorage - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Total Number of Objects and Total Size of Objects @@ -103,35 +106,36 @@ Note: The following items are available since LeoFS v1.4.0: Note: The all items are available since LeoFS v1.4.0. ### Statistics specific to the internals of LeoGateway -#### Erlang VM + +#### Erlang VM of LeoGateway - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +#### Total Number of Requests for LeoGateway - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Object Cache @@ -140,15 +144,13 @@ Note: The all items are available since LeoFS v1.4.0. - total_of_files - total_cached_size - -### Tags: +### Tags All measurements have the following tags: - node - -### Example output: +### Example output #### LeoManager @@ -221,7 +223,7 @@ $ ./telegraf --config ./plugins/inputs/leofs/leo_storage.conf --input-filter leo #### LeoGateway -``` +```shell $ ./telegraf --config ./plugins/inputs/leofs/leo_gateway.conf --input-filter leofs --test > leofs, host=gateway_0, node=gateway_0@127.0.0.1 allocated_memory=87941120, diff --git a/plugins/inputs/linux_sysctl_fs/README.md b/plugins/inputs/linux_sysctl_fs/README.md index d6598e16ff30a..30e2f30881fab 100644 --- a/plugins/inputs/linux_sysctl_fs/README.md +++ b/plugins/inputs/linux_sysctl_fs/README.md @@ -1,9 +1,9 @@ # Linux Sysctl FS Input Plugin -The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt. +The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at . Example output: -``` +```shell > linux_sysctl_fs,host=foo dentry-want-pages=0i,file-max=44222i,aio-max-nr=65536i,inode-preshrink-nr=0i,dentry-nr=64340i,dentry-unused-nr=55274i,file-nr=1568i,aio-nr=0i,inode-nr=35952i,inode-free-nr=12957i,dentry-age-limit=45i 1490982022000000000 ``` diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 8cc513e98cb70..29a66828e7455 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,6 +1,6 @@ # Logparser Input Plugin -### Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. +## Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser] The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports @@ -8,12 +8,14 @@ regex patterns. The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: + - For options in the `[inputs.logparser.grok]` section, the equivalent option will have add the `grok_` prefix when using them in the `tail` input. - The grok `measurement` option can be replaced using the standard plugin `name_override` option. Migration Example: + ```diff - [[inputs.logparser]] - files = ["/var/log/apache/access.log"] @@ -38,7 +40,7 @@ Migration Example: + data_format = "grok" ``` -### Configuration +## Configuration ```toml [[inputs.logparser]] @@ -90,15 +92,14 @@ Migration Example: # timezone = "Canada/Eastern" ``` -### Grok Parser +## Grok Parser Reference the [grok parser][] documentation to setup the grok section of the configuration. +## Additional Resources -### Additional Resources - -- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ +- [tail]: /plugins/inputs/tail/README.md [grok parser]: /plugins/parsers/grok/README.md diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 95ec3e6feae66..ee8ff59fe0d2d 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -5,7 +5,7 @@ This plugin reads metrics exposed by Logstash 5 and later is supported. -### Configuration +## Configuration ```toml [[inputs.logstash]] @@ -40,7 +40,7 @@ Logstash 5 and later is supported. # "X-Special-Header" = "Special-Value" ``` -### Metrics +## Metrics Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) @@ -80,7 +80,7 @@ Additional plugin stats may be collected (because logstash doesn't consistently - gc_collectors_young_collection_count - uptime_in_millis -+ logstash_process +- logstash_process - tags: - node_id - node_name @@ -112,7 +112,7 @@ Additional plugin stats may be collected (because logstash doesn't consistently - filtered - out -+ logstash_plugins +- logstash_plugins - tags: - node_id - node_name @@ -148,9 +148,9 @@ Additional plugin stats may be collected (because logstash doesn't consistently - page_capacity_in_bytes - queue_size_in_bytes -### Example Output +## Example Output -``` +```shell logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000 logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000 logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000 diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md index dbdf58f73b257..a6d8b08857b74 100644 --- a/plugins/inputs/lustre2/README.md +++ b/plugins/inputs/lustre2/README.md @@ -5,7 +5,7 @@ many requirements of leadership class HPC simulation environments. This plugin monitors the Lustre file system using its entries in the proc filesystem. -### Configuration +## Configuration ```toml # Read metrics from local Lustre service on OST, MDS @@ -24,7 +24,7 @@ This plugin monitors the Lustre file system using its entries in the proc filesy # ] ``` -### Metrics +## Metrics From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`: @@ -113,17 +113,16 @@ From `/proc/fs/lustre/mdt/*/job_stats`: - jobstats_sync - jobstats_unlink - -### Troubleshooting +## Troubleshooting Check for the default or custom procfiles in the proc filesystem, and reference the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not report all information from these files, only a limited set of items corresponding to the above metric fields. -### Example Output +## Example Output -``` +```shell lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000 lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000 diff --git a/plugins/inputs/lvm/README.md b/plugins/inputs/lvm/README.md index c0ce1a2e6008a..40f37500b68ed 100644 --- a/plugins/inputs/lvm/README.md +++ b/plugins/inputs/lvm/README.md @@ -3,7 +3,7 @@ The Logical Volume Management (LVM) input plugin collects information about physical volumes, volume groups, and logical volumes. -### Configuration +## Configuration The `lvm` command requires elevated permissions. If the user has configured sudo with the ability to run these commands, then set the `use_sudo` to true. @@ -15,7 +15,7 @@ sudo with the ability to run these commands, then set the `use_sudo` to true. use_sudo = false ``` -#### Using sudo +### Using sudo If your account does not already have the ability to run commands with passwordless sudo then updates to the sudoers file are required. Below @@ -31,7 +31,7 @@ Cmnd_Alias LVM = /usr/sbin/pvs *, /usr/sbin/vgs *, /usr/sbin/lvs * Defaults!LVM !logfile, !syslog, !pam_session ``` -### Metrics +## Metrics Metrics are broken out by physical volume (pv), volume group (vg), and logical volume (lv): @@ -64,14 +64,16 @@ volume (lv): - data_percent - meta_percent -### Example Output +## Example Output The following example shows a system with the root partition on an LVM group as well as with a Docker thin-provisioned LVM group on a second drive: +```shell > lvm_physical_vol,path=/dev/sda2,vol_group=vgroot free=0i,size=249510756352i,used=249510756352i,used_percent=100 1631823026000000000 > lvm_physical_vol,path=/dev/sdb,vol_group=docker free=3858759680i,size=128316342272i,used=124457582592i,used_percent=96.99277612525741 1631823026000000000 > lvm_vol_group,name=vgroot free=0i,logical_volume_count=1i,physical_volume_count=1i,size=249510756352i,snapshot_count=0i,used_percent=100 1631823026000000000 > lvm_vol_group,name=docker free=3858759680i,logical_volume_count=1i,physical_volume_count=1i,size=128316342272i,snapshot_count=0i,used_percent=96.99277612525741 1631823026000000000 > lvm_logical_vol,name=lvroot,vol_group=vgroot data_percent=0,metadata_percent=0,size=249510756352i 1631823026000000000 > lvm_logical_vol,name=thinpool,vol_group=docker data_percent=0.36000001430511475,metadata_percent=1.3300000429153442,size=121899057152i 1631823026000000000 +``` diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md index 46750f6fc5efa..a3a7f599de8bf 100644 --- a/plugins/inputs/mailchimp/README.md +++ b/plugins/inputs/mailchimp/README.md @@ -2,7 +2,7 @@ Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/). -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage mailchimp`. @@ -21,7 +21,7 @@ generate it using `telegraf --usage mailchimp`. # campaign_id = "" ``` -### Metrics +## Metrics - mailchimp - tags: diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md index 7feb4a10d9d04..acd6100df75f8 100644 --- a/plugins/inputs/marklogic/README.md +++ b/plugins/inputs/marklogic/README.md @@ -2,7 +2,7 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more host. -### Configuration: +## Configuration ```toml [[inputs.marklogic]] @@ -24,7 +24,7 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos # insecure_skip_verify = false ``` -### Metrics +## Metrics - marklogic - tags: @@ -56,9 +56,9 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos - http_server_receive_bytes - http_server_send_bytes -### Example Output: +## Example Output -``` +```shell $> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000 ``` diff --git a/plugins/inputs/mcrouter/README.md b/plugins/inputs/mcrouter/README.md index 05c2597869e05..a657ef125a6de 100644 --- a/plugins/inputs/mcrouter/README.md +++ b/plugins/inputs/mcrouter/README.md @@ -2,7 +2,7 @@ This plugin gathers statistics data from a Mcrouter server. -### Configuration: +## Configuration ```toml # Read metrics from one or many mcrouter servers. @@ -15,7 +15,7 @@ This plugin gathers statistics data from a Mcrouter server. # timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the *mcrouter* measurement. @@ -88,16 +88,14 @@ Fields: * cmd_delete_out_all * cmd_lease_set_out_all -### Tags: +## Tags * Mcrouter measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) +## Example Output - -### Example Output: - -``` +```shell $ ./telegraf --config telegraf.conf --input-filter mcrouter --test mcrouter,server=localhost:11211 uptime=166,num_servers=1,num_servers_new=1,num_servers_up=0,num_servers_down=0,num_servers_closed=0,num_clients=1,num_suspect_servers=0,destination_batches_sum=0,destination_requests_sum=0,outstanding_route_get_reqs_queued=0,outstanding_route_update_reqs_queued=0,outstanding_route_get_avg_queue_size=0,outstanding_route_update_avg_queue_size=0,outstanding_route_get_avg_wait_time_sec=0,outstanding_route_update_avg_wait_time_sec=0,retrans_closed_connections=0,destination_pending_reqs=0,destination_inflight_reqs=0,destination_batch_size=0,asynclog_requests=0,proxy_reqs_processing=1,proxy_reqs_waiting=0,client_queue_notify_period=0,rusage_system=0.040966,rusage_user=0.020483,ps_num_minor_faults=2490,ps_num_major_faults=11,ps_user_time_sec=0.02,ps_system_time_sec=0.04,ps_vsize=697741312,ps_rss=10563584,fibers_allocated=0,fibers_pool_size=0,fibers_stack_high_watermark=0,successful_client_connections=18,duration_us=0,destination_max_pending_reqs=0,destination_max_inflight_reqs=0,retrans_per_kbyte_max=0,cmd_get_count=0,cmd_delete_out=0,cmd_lease_get=0,cmd_set=0,cmd_get_out_all=0,cmd_get_out=0,cmd_lease_set_count=0,cmd_other_out_all=0,cmd_lease_get_out=0,cmd_set_count=0,cmd_lease_set_out=0,cmd_delete_count=0,cmd_other=0,cmd_delete=0,cmd_get=0,cmd_lease_set=0,cmd_set_out=0,cmd_lease_get_count=0,cmd_other_out=0,cmd_lease_get_out_all=0,cmd_set_out_all=0,cmd_other_count=0,cmd_delete_out_all=0,cmd_lease_set_out_all=0 1453831884664956455 ``` diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md index 6180833b69ade..462ac89ca5507 100644 --- a/plugins/inputs/mdstat/README.md +++ b/plugins/inputs/mdstat/README.md @@ -1,15 +1,14 @@ # mdstat Input Plugin The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host -by reading /proc/mdstat. For a full list of available fields see the +by reading /proc/mdstat. For a full list of available fields see the /proc/mdstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). -For a better idea of what each field represents, see the +For a better idea of what each field represents, see the [mdstat man page](https://raid.wiki.kernel.org/index.php/Mdstat). -Stat collection based on Prometheus' mdstat collection library at https://github.com/prometheus/procfs/blob/master/mdstat.go +Stat collection based on Prometheus' mdstat collection library at - -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/mdstat @@ -19,7 +18,7 @@ Stat collection based on Prometheus' mdstat collection library at https://github # file_name = "/proc/mdstat" ``` -### Measurements & Fields: +## Measurements & Fields - mdstat - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) @@ -32,16 +31,16 @@ Stat collection based on Prometheus' mdstat collection library at https://github - DisksSpare (the current count of "spare" disks in the array) - DisksTotal (total count of disks in the array) -### Tags: +## Tags - mdstat - ActivityState (`active` or `inactive`) - Devices (comma separated list of devices that make up the array) - Name (name of the array) -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test * Plugin: mdstat, Collection 1 > mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md index 9122b885a09e1..3fff5178de75d 100644 --- a/plugins/inputs/mem/README.md +++ b/plugins/inputs/mem/README.md @@ -5,14 +5,15 @@ The mem plugin collects system memory metrics. For a more complete explanation of the difference between *used* and *actual_used* RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). -### Configuration: +## Configuration + ```toml # Read metrics about memory usage [[inputs.mem]] # no configuration ``` -### Metrics: +## Metrics Available fields are dependent on platform. @@ -55,7 +56,8 @@ Available fields are dependent on platform. - write_back (integer, Linux) - write_back_tmp (integer, Linux) -### Example Output: -``` +## Example Output + +```shell mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000 ``` diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index 721be913054a7..e3f8fafea48d6 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -2,7 +2,7 @@ This plugin gathers statistics data from a Memcached server. -### Configuration: +## Configuration ```toml # Read metrics from one or many memcached servers. @@ -14,7 +14,7 @@ This plugin gathers statistics data from a Memcached server. # unix_sockets = ["/var/run/memcached.sock"] ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the *memcached* measurement. @@ -63,22 +63,22 @@ Fields: Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). -### Tags: +## Tags * Memcached measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) -### Sample Queries: +## Sample Queries You can use the following query to get the average get hit and miss ratio, as well as the total average size of cached items, number of cached items and average connection counts per server. -``` +```sql SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_get) as get_misses_ratio, mean(bytes), mean(curr_items), mean(curr_connections) FROM memcached WHERE time > now() - 1h GROUP BY server ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter memcached --test memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455 ``` diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 2845881880d95..0d48164fc8984 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -3,7 +3,7 @@ This input plugin gathers metrics from Mesos. For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. -### Configuration: +## Configuration ```toml # Telegraf plugin for gathering metrics from N Mesos masters @@ -53,280 +53,282 @@ For more information, please check the [Mesos Observability Metrics](http://meso By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default values. User needs to specify master/slave nodes this plugin will gather metrics from. -### Measurements & Fields: +## Measurements & Fields Mesos master metric groups - resources - - master/cpus_percent - - master/cpus_used - - master/cpus_total - - master/cpus_revocable_percent - - master/cpus_revocable_total - - master/cpus_revocable_used - - master/disk_percent - - master/disk_used - - master/disk_total - - master/disk_revocable_percent - - master/disk_revocable_total - - master/disk_revocable_used - - master/gpus_percent - - master/gpus_used - - master/gpus_total - - master/gpus_revocable_percent - - master/gpus_revocable_total - - master/gpus_revocable_used - - master/mem_percent - - master/mem_used - - master/mem_total - - master/mem_revocable_percent - - master/mem_revocable_total - - master/mem_revocable_used + - master/cpus_percent + - master/cpus_used + - master/cpus_total + - master/cpus_revocable_percent + - master/cpus_revocable_total + - master/cpus_revocable_used + - master/disk_percent + - master/disk_used + - master/disk_total + - master/disk_revocable_percent + - master/disk_revocable_total + - master/disk_revocable_used + - master/gpus_percent + - master/gpus_used + - master/gpus_total + - master/gpus_revocable_percent + - master/gpus_revocable_total + - master/gpus_revocable_used + - master/mem_percent + - master/mem_used + - master/mem_total + - master/mem_revocable_percent + - master/mem_revocable_total + - master/mem_revocable_used - master - - master/elected - - master/uptime_secs + - master/elected + - master/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - slaves - - master/slave_registrations - - master/slave_removals - - master/slave_reregistrations - - master/slave_shutdowns_scheduled - - master/slave_shutdowns_canceled - - master/slave_shutdowns_completed - - master/slaves_active - - master/slaves_connected - - master/slaves_disconnected - - master/slaves_inactive - - master/slave_unreachable_canceled - - master/slave_unreachable_completed - - master/slave_unreachable_scheduled - - master/slaves_unreachable + - master/slave_registrations + - master/slave_removals + - master/slave_reregistrations + - master/slave_shutdowns_scheduled + - master/slave_shutdowns_canceled + - master/slave_shutdowns_completed + - master/slaves_active + - master/slaves_connected + - master/slaves_disconnected + - master/slaves_inactive + - master/slave_unreachable_canceled + - master/slave_unreachable_completed + - master/slave_unreachable_scheduled + - master/slaves_unreachable - frameworks - - master/frameworks_active - - master/frameworks_connected - - master/frameworks_disconnected - - master/frameworks_inactive - - master/outstanding_offers + - master/frameworks_active + - master/frameworks_connected + - master/frameworks_disconnected + - master/frameworks_inactive + - master/outstanding_offers - framework offers - - master/frameworks/subscribed - - master/frameworks/calls_total - - master/frameworks/calls - - master/frameworks/events_total - - master/frameworks/events - - master/frameworks/operations_total - - master/frameworks/operations - - master/frameworks/tasks/active - - master/frameworks/tasks/terminal - - master/frameworks/offers/sent - - master/frameworks/offers/accepted - - master/frameworks/offers/declined - - master/frameworks/offers/rescinded - - master/frameworks/roles/suppressed + - master/frameworks/subscribed + - master/frameworks/calls_total + - master/frameworks/calls + - master/frameworks/events_total + - master/frameworks/events + - master/frameworks/operations_total + - master/frameworks/operations + - master/frameworks/tasks/active + - master/frameworks/tasks/terminal + - master/frameworks/offers/sent + - master/frameworks/offers/accepted + - master/frameworks/offers/declined + - master/frameworks/offers/rescinded + - master/frameworks/roles/suppressed - tasks - - master/tasks_error - - master/tasks_failed - - master/tasks_finished - - master/tasks_killed - - master/tasks_lost - - master/tasks_running - - master/tasks_staging - - master/tasks_starting - - master/tasks_dropped - - master/tasks_gone - - master/tasks_gone_by_operator - - master/tasks_killing - - master/tasks_unreachable + - master/tasks_error + - master/tasks_failed + - master/tasks_finished + - master/tasks_killed + - master/tasks_lost + - master/tasks_running + - master/tasks_staging + - master/tasks_starting + - master/tasks_dropped + - master/tasks_gone + - master/tasks_gone_by_operator + - master/tasks_killing + - master/tasks_unreachable - messages - - master/invalid_executor_to_framework_messages - - master/invalid_framework_to_executor_messages - - master/invalid_status_update_acknowledgements - - master/invalid_status_updates - - master/dropped_messages - - master/messages_authenticate - - master/messages_deactivate_framework - - master/messages_decline_offers - - master/messages_executor_to_framework - - master/messages_exited_executor - - master/messages_framework_to_executor - - master/messages_kill_task - - master/messages_launch_tasks - - master/messages_reconcile_tasks - - master/messages_register_framework - - master/messages_register_slave - - master/messages_reregister_framework - - master/messages_reregister_slave - - master/messages_resource_request - - master/messages_revive_offers - - master/messages_status_update - - master/messages_status_update_acknowledgement - - master/messages_unregister_framework - - master/messages_unregister_slave - - master/messages_update_slave - - master/recovery_slave_removals - - master/slave_removals/reason_registered - - master/slave_removals/reason_unhealthy - - master/slave_removals/reason_unregistered - - master/valid_framework_to_executor_messages - - master/valid_status_update_acknowledgements - - master/valid_status_updates - - master/task_lost/source_master/reason_invalid_offers - - master/task_lost/source_master/reason_slave_removed - - master/task_lost/source_slave/reason_executor_terminated - - master/valid_executor_to_framework_messages - - master/invalid_operation_status_update_acknowledgements - - master/messages_operation_status_update_acknowledgement - - master/messages_reconcile_operations - - master/messages_suppress_offers - - master/valid_operation_status_update_acknowledgements + - master/invalid_executor_to_framework_messages + - master/invalid_framework_to_executor_messages + - master/invalid_status_update_acknowledgements + - master/invalid_status_updates + - master/dropped_messages + - master/messages_authenticate + - master/messages_deactivate_framework + - master/messages_decline_offers + - master/messages_executor_to_framework + - master/messages_exited_executor + - master/messages_framework_to_executor + - master/messages_kill_task + - master/messages_launch_tasks + - master/messages_reconcile_tasks + - master/messages_register_framework + - master/messages_register_slave + - master/messages_reregister_framework + - master/messages_reregister_slave + - master/messages_resource_request + - master/messages_revive_offers + - master/messages_status_update + - master/messages_status_update_acknowledgement + - master/messages_unregister_framework + - master/messages_unregister_slave + - master/messages_update_slave + - master/recovery_slave_removals + - master/slave_removals/reason_registered + - master/slave_removals/reason_unhealthy + - master/slave_removals/reason_unregistered + - master/valid_framework_to_executor_messages + - master/valid_status_update_acknowledgements + - master/valid_status_updates + - master/task_lost/source_master/reason_invalid_offers + - master/task_lost/source_master/reason_slave_removed + - master/task_lost/source_slave/reason_executor_terminated + - master/valid_executor_to_framework_messages + - master/invalid_operation_status_update_acknowledgements + - master/messages_operation_status_update_acknowledgement + - master/messages_reconcile_operations + - master/messages_suppress_offers + - master/valid_operation_status_update_acknowledgements - evqueue - - master/event_queue_dispatches - - master/event_queue_http_requests - - master/event_queue_messages - - master/operator_event_stream_subscribers + - master/event_queue_dispatches + - master/event_queue_http_requests + - master/event_queue_messages + - master/operator_event_stream_subscribers - registrar - - registrar/state_fetch_ms - - registrar/state_store_ms - - registrar/state_store_ms/max - - registrar/state_store_ms/min - - registrar/state_store_ms/p50 - - registrar/state_store_ms/p90 - - registrar/state_store_ms/p95 - - registrar/state_store_ms/p99 - - registrar/state_store_ms/p999 - - registrar/state_store_ms/p9999 - - registrar/state_store_ms/count - - registrar/log/ensemble_size - - registrar/log/recovered - - registrar/queued_operations - - registrar/registry_size_bytes + - registrar/state_fetch_ms + - registrar/state_store_ms + - registrar/state_store_ms/max + - registrar/state_store_ms/min + - registrar/state_store_ms/p50 + - registrar/state_store_ms/p90 + - registrar/state_store_ms/p95 + - registrar/state_store_ms/p99 + - registrar/state_store_ms/p999 + - registrar/state_store_ms/p9999 + - registrar/state_store_ms/count + - registrar/log/ensemble_size + - registrar/log/recovered + - registrar/queued_operations + - registrar/registry_size_bytes - allocator - - allocator/allocation_run_ms - - allocator/allocation_run_ms/count - - allocator/allocation_run_ms/max - - allocator/allocation_run_ms/min - - allocator/allocation_run_ms/p50 - - allocator/allocation_run_ms/p90 - - allocator/allocation_run_ms/p95 - - allocator/allocation_run_ms/p99 - - allocator/allocation_run_ms/p999 - - allocator/allocation_run_ms/p9999 - - allocator/allocation_runs - - allocator/allocation_run_latency_ms - - allocator/allocation_run_latency_ms/count - - allocator/allocation_run_latency_ms/max - - allocator/allocation_run_latency_ms/min - - allocator/allocation_run_latency_ms/p50 - - allocator/allocation_run_latency_ms/p90 - - allocator/allocation_run_latency_ms/p95 - - allocator/allocation_run_latency_ms/p99 - - allocator/allocation_run_latency_ms/p999 - - allocator/allocation_run_latency_ms/p9999 - - allocator/roles/shares/dominant - - allocator/event_queue_dispatches - - allocator/offer_filters/roles/active - - allocator/quota/roles/resources/offered_or_allocated - - allocator/quota/roles/resources/guarantee - - allocator/resources/cpus/offered_or_allocated - - allocator/resources/cpus/total - - allocator/resources/disk/offered_or_allocated - - allocator/resources/disk/total - - allocator/resources/mem/offered_or_allocated - - allocator/resources/mem/total + - allocator/allocation_run_ms + - allocator/allocation_run_ms/count + - allocator/allocation_run_ms/max + - allocator/allocation_run_ms/min + - allocator/allocation_run_ms/p50 + - allocator/allocation_run_ms/p90 + - allocator/allocation_run_ms/p95 + - allocator/allocation_run_ms/p99 + - allocator/allocation_run_ms/p999 + - allocator/allocation_run_ms/p9999 + - allocator/allocation_runs + - allocator/allocation_run_latency_ms + - allocator/allocation_run_latency_ms/count + - allocator/allocation_run_latency_ms/max + - allocator/allocation_run_latency_ms/min + - allocator/allocation_run_latency_ms/p50 + - allocator/allocation_run_latency_ms/p90 + - allocator/allocation_run_latency_ms/p95 + - allocator/allocation_run_latency_ms/p99 + - allocator/allocation_run_latency_ms/p999 + - allocator/allocation_run_latency_ms/p9999 + - allocator/roles/shares/dominant + - allocator/event_queue_dispatches + - allocator/offer_filters/roles/active + - allocator/quota/roles/resources/offered_or_allocated + - allocator/quota/roles/resources/guarantee + - allocator/resources/cpus/offered_or_allocated + - allocator/resources/cpus/total + - allocator/resources/disk/offered_or_allocated + - allocator/resources/disk/total + - allocator/resources/mem/offered_or_allocated + - allocator/resources/mem/total Mesos slave metric groups + - resources - - slave/cpus_percent - - slave/cpus_used - - slave/cpus_total - - slave/cpus_revocable_percent - - slave/cpus_revocable_total - - slave/cpus_revocable_used - - slave/disk_percent - - slave/disk_used - - slave/disk_total - - slave/disk_revocable_percent - - slave/disk_revocable_total - - slave/disk_revocable_used - - slave/gpus_percent - - slave/gpus_used - - slave/gpus_total, - - slave/gpus_revocable_percent - - slave/gpus_revocable_total - - slave/gpus_revocable_used - - slave/mem_percent - - slave/mem_used - - slave/mem_total - - slave/mem_revocable_percent - - slave/mem_revocable_total - - slave/mem_revocable_used + - slave/cpus_percent + - slave/cpus_used + - slave/cpus_total + - slave/cpus_revocable_percent + - slave/cpus_revocable_total + - slave/cpus_revocable_used + - slave/disk_percent + - slave/disk_used + - slave/disk_total + - slave/disk_revocable_percent + - slave/disk_revocable_total + - slave/disk_revocable_used + - slave/gpus_percent + - slave/gpus_used + - slave/gpus_total, + - slave/gpus_revocable_percent + - slave/gpus_revocable_total + - slave/gpus_revocable_used + - slave/mem_percent + - slave/mem_used + - slave/mem_total + - slave/mem_revocable_percent + - slave/mem_revocable_total + - slave/mem_revocable_used - agent - - slave/registered - - slave/uptime_secs + - slave/registered + - slave/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - executors - - containerizer/mesos/container_destroy_errors - - slave/container_launch_errors - - slave/executors_preempted - - slave/frameworks_active - - slave/executor_directory_max_allowed_age_secs - - slave/executors_registering - - slave/executors_running - - slave/executors_terminated - - slave/executors_terminating - - slave/recovery_errors + - containerizer/mesos/container_destroy_errors + - slave/container_launch_errors + - slave/executors_preempted + - slave/frameworks_active + - slave/executor_directory_max_allowed_age_secs + - slave/executors_registering + - slave/executors_running + - slave/executors_terminated + - slave/executors_terminating + - slave/recovery_errors - tasks - - slave/tasks_failed - - slave/tasks_finished - - slave/tasks_killed - - slave/tasks_lost - - slave/tasks_running - - slave/tasks_staging - - slave/tasks_starting + - slave/tasks_failed + - slave/tasks_finished + - slave/tasks_killed + - slave/tasks_lost + - slave/tasks_running + - slave/tasks_staging + - slave/tasks_starting - messages - - slave/invalid_framework_messages - - slave/invalid_status_updates - - slave/valid_framework_messages - - slave/valid_status_updates + - slave/invalid_framework_messages + - slave/invalid_status_updates + - slave/valid_framework_messages + - slave/valid_status_updates -### Tags: +## Tags - All master/slave measurements have the following tags: - - server (network location of server: `host:port`) - - url (URL origin of server: `scheme://host:port`) - - role (master/slave) + - server (network location of server: `host:port`) + - url (URL origin of server: `scheme://host:port`) + - role (master/slave) - All master measurements have the extra tags: - - state (leader/follower) + - state (leader/follower) -### Example Output: -``` +## Example Output + +```shell $ telegraf --config ~/mesos.conf --input-filter mesos --test * Plugin: mesos, Collection 1 mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 @@ -347,4 +349,3 @@ master/mem_revocable_used=0,master/mem_total=1002, master/mem_used=0,master/messages_authenticate=0, master/messages_deactivate_framework=0 ... ``` - diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 026c9e3b3fb99..e5f1f00ef26b8 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -7,7 +7,7 @@ This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14. When using an version of Minecraft earlier than 1.13, be aware that the values for some criterion has changed and may need to be modified. -#### Server Setup +## Server Setup Enable [RCON][] on the Minecraft server, add this to your server configuration in the [server.properties][] file: @@ -24,22 +24,25 @@ from the server console, or over an RCON connection. When getting started pick an easy to test objective. This command will add an objective that counts the number of times a player has jumped: -``` + +```sh /scoreboard objectives add jumps minecraft.custom:minecraft.jump ``` Once a player has triggered the event they will be added to the scoreboard, you can then list all players with recorded scores: -``` + +```sh /scoreboard players list ``` View the current scores with a command, substituting your player name: -``` + +```sh /scoreboard players list Etho ``` -### Configuration +## Configuration ```toml [[inputs.minecraft]] @@ -53,7 +56,7 @@ View the current scores with a command, substituting your player name: password = "" ``` -### Metrics +## Metrics - minecraft - tags: @@ -64,15 +67,17 @@ View the current scores with a command, substituting your player name: - fields: - `` (integer, count) -### Sample Queries: +## Sample Queries Get the number of jumps per player in the last hour: + ```sql SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" ``` -### Example Output: -``` +## Example Output + +```shell minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000 minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index ac01e140b695c..29721487ffec3 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -3,7 +3,7 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding Registers via Modbus TCP or Modbus RTU/ASCII. -### Configuration +## Configuration ```toml [[inputs.modbus]] @@ -103,17 +103,18 @@ Registers via Modbus TCP or Modbus RTU/ASCII. # close_connection_after_gather = false ``` -### Notes +## Notes + You can debug Modbus connection issues by enabling `debug_connection`. To see those debug messages Telegraf has to be started with debugging enabled (i.e. with `--debug` option). Please be aware that connection tracing will produce a lot of messages and should **NOT** be used in production environments. Please use `pause_between_requests` with care. Especially make sure that the total gather time, including the pause(s), does not exceed the configured collection interval. Note, that pauses add up if multiple requests are sent! -### Metrics +## Metrics Metric are custom and configured using the `discrete_inputs`, `coils`, `holding_register` and `input_registers` options. -### Usage of `data_type` +## Usage of `data_type` The field `data_type` defines the representation of the data value on input from the modbus registers. The input values are then converted from the given `data_type` to a type that is apropriate when @@ -122,16 +123,16 @@ integer or floating-point-number. The size of the output type is assumed to be l for all supported input types. The mapping from the input type to the output type is fixed and cannot be configured. -#### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` +### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` These types are used for integer input values. Select the one that matches your modbus data source. -#### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` +### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` Use these types if your modbus registers contain a value that is encoded in this format. These types always include the sign and therefore there exists no variant. -#### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) +### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) These types are handled as an integer type on input, but are converted to floating point representation for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point @@ -148,9 +149,10 @@ with N decimal places'. (FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion from unsigned values). -### Trouble shooting +## Trouble shooting + +### Strange data -#### Strange data Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also swap the bytes within the register word (16-bit). @@ -164,7 +166,8 @@ In case you see strange values, the `byte_order` might be off. You can either pr If your data still looks corrupted, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). -#### Workarounds +### Workarounds + Some Modbus devices need special read characteristics when reading data and will fail otherwise. For example, there are certain serial devices that need a certain pause between register read requests. Others might only offer a limited number of simultaneously connected devices, like serial devices or some ModbusTCP devices. In case you need to access those devices in parallel you might want to disconnect immediately after the plugin finished reading. To allow this plugin to also handle those "special" devices there is the `workarounds` configuration options. In case your documentation states certain read requirements or you get read timeouts or other read errors you might want to try one or more workaround options. @@ -172,7 +175,7 @@ If you find that other/more workarounds are required for your device, please let In case your device needs a workaround that is not yet implemented, please open an issue or submit a pull-request. -### Example Output +## Example Output ```sh $ ./telegraf -config telegraf.conf -input-filter modbus -test diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index aa4a08b31bbc8..053e745982a5b 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -12,7 +12,7 @@ Minimum Version of Monit tested with is 5.16. [monit]: https://mmonit.com/ [httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT -### Configuration +## Configuration ```toml [[inputs.monit]] @@ -34,7 +34,7 @@ Minimum Version of Monit tested with is 5.16. # insecure_skip_verify = false ``` -### Metrics +## Metrics - monit_filesystem - tags: @@ -57,7 +57,7 @@ Minimum Version of Monit tested with is 5.16. - inode_usage - inode_total -+ monit_directory +- monit_directory - tags: - address - version @@ -88,7 +88,7 @@ Minimum Version of Monit tested with is 5.16. - size - permissions -+ monit_process +- monit_process - tags: - address - version @@ -132,7 +132,7 @@ Minimum Version of Monit tested with is 5.16. - protocol - type -+ monit_system +- monit_system - tags: - address - version @@ -169,9 +169,9 @@ Minimum Version of Monit tested with is 5.16. - status_code - monitoring_status_code - monitoring_mode_code - - permissions + - permissions -+ monit_program +- monit_program - tags: - address - version @@ -199,7 +199,7 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -+ monit_program +- monit_program - tags: - address - version @@ -227,8 +227,9 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -### Example Output -``` +## Example Output + +```shell monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000 monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 2d71ac159cdd2..b460d59aa90ed 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -7,7 +7,8 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. > Note: If you wish to parse metrics from a single file formatted in one of the supported > [input data formats][], you should use the [file][] input plugin instead. -### Configuration +## Configuration + ```toml [[inputs.multifile]] ## Base directory where telegraf will look for files. @@ -34,32 +35,37 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. ``` Each file table can contain the following options: + * `file`: Path of the file to be parsed, relative to the `base_dir`. * `dest`: Name of the field/tag key, defaults to `$(basename file)`. * `conversion`: Data format used to parse the file contents: - * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - * `int`: Converts the value into an integer. - * `string`, `""`: No conversion. - * `bool`: Converts the value into a boolean. - * `tag`: File content is used as a tag. - -### Example Output + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + * `int`: Converts the value into an integer. + * `string`, `""`: No conversion. + * `bool`: Converts the value into a boolean. + * `tag`: File content is used as a tag. + +## Example Output + This example shows a BME280 connected to a Raspberry Pi, using the sample config. -``` + +```sh multifile pressure=101.343285156,temperature=20.4,humidityrelative=48.9 1547202076000000000 ``` To reproduce this, connect a BMP280 to the board's GPIO pins and register the BME280 device driver -``` + +```sh cd /sys/bus/i2c/devices/i2c-1 echo bme280 0x76 > new_device ``` The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/iio:device0`: + * `in_humidityrelative_input`: `48900` * `in_pressure_input`: `101.343285156` * `in_temp_input`: `20400` diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 0a96f9c9b1447..cd98b454408b0 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -18,7 +18,7 @@ This plugin gathers the statistic data from MySQL server * File events statistics * Table schema statistics -### Configuration +## Configuration ```toml [[inputs.mysql]] @@ -122,7 +122,7 @@ This plugin gathers the statistic data from MySQL server # insecure_skip_verify = false ``` -#### Metric Version +### Metric Version When `metric_version = 2`, a variety of field type issues are corrected as well as naming inconsistencies. If you have existing data on the original version @@ -132,6 +132,7 @@ InfluxDB due to the change of types. For this reason, you should keep the If preserving your old data is not required you may wish to drop conflicting measurements: + ```sql DROP SERIES from mysql DROP SERIES from mysql_variables @@ -143,6 +144,7 @@ Otherwise, migration can be performed using the following steps: 1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and `metric_version = 2`, this will result in collection using both the old and new style concurrently: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -157,8 +159,8 @@ style concurrently: 2. Upgrade all affected Telegraf clients to version >=1.6. New measurements will be created with the `name_suffix`, for example:: - - `mysql_v2` - - `mysql_variables_v2` + * `mysql_v2` + * `mysql_variables_v2` 3. Update charts, alerts, and other supporting code to the new format. 4. You can now remove the old `mysql` plugin configuration and remove old @@ -169,6 +171,7 @@ historical data to the default name. Do this only after retiring the old measurement name. 1. Use the technique described above to write to multiple locations: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -180,8 +183,10 @@ measurement name. servers = ["tcp(127.0.0.1:3306)/"] ``` + 2. Create a TICKScript to copy the historical data: - ``` + + ```sql dbrp "telegraf"."autogen" batch @@ -195,17 +200,23 @@ measurement name. .retentionPolicy('autogen') .measurement('mysql') ``` + 3. Define a task for your script: + ```sh kapacitor define copy-measurement -tick copy-measurement.task ``` + 4. Run the task over the data you would like to migrate: + ```sh kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement ``` + 5. Verify copied data and repeat for other measurements. -### Metrics: +## Metrics + * Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES` * Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES` * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when @@ -214,141 +225,142 @@ then everything works differently, this metric does not work with multi-source replication, unless you set `gather_all_slave_channels = true`. For MariaDB, `mariadb_dialect = true` should be set to address the field names and commands differences. - * slave_[column name]() + * slave_[column name] * Binary logs - all metrics including size and count of all binary files. Requires to be turned on in configuration. - * binary_size_bytes(int, number) - * binary_files_count(int, number) + * binary_size_bytes(int, number) + * binary_files_count(int, number) * Process list - connection metrics from processlist for each user. It has the following tags - * connections(int, number) + * connections(int, number) * User Statistics - connection metrics from user statistics for each user. It has the following fields - * access_denied - * binlog_bytes_written - * busy_time - * bytes_received - * bytes_sent - * commit_transactions - * concurrent_connections - * connected_time - * cpu_time - * denied_connections - * empty_queries - * hostlost_connections - * other_commands - * rollback_transactions - * rows_fetched - * rows_updated - * select_commands - * server - * table_rows_read - * total_connections - * total_ssl_connections - * update_commands - * user + * access_denied + * binlog_bytes_written + * busy_time + * bytes_received + * bytes_sent + * commit_transactions + * concurrent_connections + * connected_time + * cpu_time + * denied_connections + * empty_queries + * hostlost_connections + * other_commands + * rollback_transactions + * rows_fetched + * rows_updated + * select_commands + * server + * table_rows_read + * total_connections + * total_ssl_connections + * update_commands + * user * Perf Table IO waits - total count and time of I/O waits event for each table and process. It has following fields: - * table_io_waits_total_fetch(float, number) - * table_io_waits_total_insert(float, number) - * table_io_waits_total_update(float, number) - * table_io_waits_total_delete(float, number) - * table_io_waits_seconds_total_fetch(float, milliseconds) - * table_io_waits_seconds_total_insert(float, milliseconds) - * table_io_waits_seconds_total_update(float, milliseconds) - * table_io_waits_seconds_total_delete(float, milliseconds) + * table_io_waits_total_fetch(float, number) + * table_io_waits_total_insert(float, number) + * table_io_waits_total_update(float, number) + * table_io_waits_total_delete(float, number) + * table_io_waits_seconds_total_fetch(float, milliseconds) + * table_io_waits_seconds_total_insert(float, milliseconds) + * table_io_waits_seconds_total_update(float, milliseconds) + * table_io_waits_seconds_total_delete(float, milliseconds) * Perf index IO waits - total count and time of I/O waits event for each index and process. It has following fields: - * index_io_waits_total_fetch(float, number) - * index_io_waits_seconds_total_fetch(float, milliseconds) - * index_io_waits_total_insert(float, number) - * index_io_waits_total_update(float, number) - * index_io_waits_total_delete(float, number) - * index_io_waits_seconds_total_insert(float, milliseconds) - * index_io_waits_seconds_total_update(float, milliseconds) - * index_io_waits_seconds_total_delete(float, milliseconds) + * index_io_waits_total_fetch(float, number) + * index_io_waits_seconds_total_fetch(float, milliseconds) + * index_io_waits_total_insert(float, number) + * index_io_waits_total_update(float, number) + * index_io_waits_total_delete(float, number) + * index_io_waits_seconds_total_insert(float, milliseconds) + * index_io_waits_seconds_total_update(float, milliseconds) + * index_io_waits_seconds_total_delete(float, milliseconds) * Info schema autoincrement statuses - autoincrement fields and max values for them. It has following fields: - * auto_increment_column(int, number) - * auto_increment_column_max(int, number) + * auto_increment_column(int, number) + * auto_increment_column_max(int, number) * InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a status "enabled" * Perf table lock waits - gathers total number and time for SQL and external lock waits events for each table and operation. It has following fields. The unit of fields varies by the tags. - * read_normal(float, number/milliseconds) - * read_with_shared_locks(float, number/milliseconds) - * read_high_priority(float, number/milliseconds) - * read_no_insert(float, number/milliseconds) - * write_normal(float, number/milliseconds) - * write_allow_write(float, number/milliseconds) - * write_concurrent_insert(float, number/milliseconds) - * write_low_priority(float, number/milliseconds) - * read(float, number/milliseconds) - * write(float, number/milliseconds) + * read_normal(float, number/milliseconds) + * read_with_shared_locks(float, number/milliseconds) + * read_high_priority(float, number/milliseconds) + * read_no_insert(float, number/milliseconds) + * write_normal(float, number/milliseconds) + * write_allow_write(float, number/milliseconds) + * write_concurrent_insert(float, number/milliseconds) + * write_low_priority(float, number/milliseconds) + * read(float, number/milliseconds) + * write(float, number/milliseconds) * Perf events waits - gathers total time and number of event waits - * events_waits_total(float, number) - * events_waits_seconds_total(float, milliseconds) + * events_waits_total(float, number) + * events_waits_seconds_total(float, milliseconds) * Perf file events statuses - gathers file events statuses - * file_events_total(float,number) - * file_events_seconds_total(float, milliseconds) - * file_events_bytes_total(float, bytes) + * file_events_total(float,number) + * file_events_seconds_total(float, milliseconds) + * file_events_bytes_total(float, bytes) * Perf events statements - gathers attributes of each event - * events_statements_total(float, number) - * events_statements_seconds_total(float, millieconds) - * events_statements_errors_total(float, number) - * events_statements_warnings_total(float, number) - * events_statements_rows_affected_total(float, number) - * events_statements_rows_sent_total(float, number) - * events_statements_rows_examined_total(float, number) - * events_statements_tmp_tables_total(float, number) - * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totals(float, number) - * events_statements_sort_rows_total(float, number) - * events_statements_no_index_used_total(float, number) + * events_statements_total(float, number) + * events_statements_seconds_total(float, millieconds) + * events_statements_errors_total(float, number) + * events_statements_warnings_total(float, number) + * events_statements_rows_affected_total(float, number) + * events_statements_rows_sent_total(float, number) + * events_statements_rows_examined_total(float, number) + * events_statements_tmp_tables_total(float, number) + * events_statements_tmp_disk_tables_total(float, number) + * events_statements_sort_merge_passes_totals(float, number) + * events_statements_sort_rows_total(float, number) + * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements - * info_schema_table_rows(float, number) - * info_schema_table_size_data_length(float, number) - * info_schema_table_size_index_length(float, number) - * info_schema_table_size_data_free(float, number) - * info_schema_table_version(float, number) + * info_schema_table_rows(float, number) + * info_schema_table_size_data_length(float, number) + * info_schema_table_size_index_length(float, number) + * info_schema_table_size_data_free(float, number) + * info_schema_table_version(float, number) ## Tags + * All measurements has following tags - * server (the host name from which the metrics are gathered) + * server (the host name from which the metrics are gathered) * Process list measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * User Statistics measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * Perf table IO waits measurement has following tags - * schema - * name (object name for event or process) + * schema + * name (object name for event or process) * Perf index IO waits has following tags - * schema - * name - * index + * schema + * name + * index * Info schema autoincrement statuses has following tags - * schema - * table - * column + * schema + * table + * column * Perf table lock waits has following tags - * schema - * table - * sql_lock_waits_total(fields including this tag have numeric unit) - * external_lock_waits_total(fields including this tag have numeric unit) - * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) - * external_lock_waits_seconds_total(fields including this tag have millisecond unit) + * schema + * table + * sql_lock_waits_total(fields including this tag have numeric unit) + * external_lock_waits_total(fields including this tag have numeric unit) + * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) + * external_lock_waits_seconds_total(fields including this tag have millisecond unit) * Perf events statements has following tags - * event_name + * event_name * Perf file events statuses has following tags - * event_name - * mode + * event_name + * mode * Perf file events statements has following tags - * schema - * digest - * digest_text + * schema + * digest + * digest_text * Table schema has following tags - * schema - * table - * component - * type - * engine - * row_format - * create_options + * schema + * table + * component + * type + * engine + * row_format + * create_options From 121ae1faaa28a5c21c79eeb5f54f94e6be3c739f Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 12:19:25 -0700 Subject: [PATCH 081/133] fix: check error before defer in prometheus k8s (#10091) --- plugins/inputs/prometheus/kubernetes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 9a4d6bd325c46..e3217e697d914 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -110,10 +110,10 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) LabelSelector: p.KubernetesLabelSelector, FieldSelector: p.KubernetesFieldSelector, }) - defer watcher.Stop() if err != nil { return err } + defer watcher.Stop() for { select { From 64aee2c87b8f461c1da8e7b38bcaa1031c9d5b71 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 24 Nov 2021 12:28:10 -0700 Subject: [PATCH 082/133] fix: update nats-sever to support openbsd (#10046) Resolves: #10035 --- go.mod | 6 +++--- go.sum | 19 +++++++------------ 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index f1940dcfa26c5..65107c40d7a34 100644 --- a/go.mod +++ b/go.mod @@ -195,9 +195,9 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/jwt/v2 v2.0.2 // indirect - github.com/nats-io/nats-server/v2 v2.2.6 - github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/jwt/v2 v2.1.0 // indirect + github.com/nats-io/nats-server/v2 v2.6.3 + github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 diff --git a/go.sum b/go.sum index 766b94293531e..f36bcce7474d7 100644 --- a/go.sum +++ b/go.sum @@ -1380,8 +1380,6 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= @@ -1579,21 +1577,18 @@ github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5w github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= -github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/jwt/v2 v2.1.0 h1:1UbfD5g1xTdWmSeRV8bh/7u+utTiBsRtWhLl1PixZp4= +github.com/nats-io/jwt/v2 v2.1.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats-server/v2 v2.6.3 h1:/ponRuIBtTiVDZRBjTKP+Cm/SWpvovI3vuB3pkpRQWw= +github.com/nats-io/nats-server/v2 v2.6.3/go.mod h1:LlMieumxNUnCloOTVFv7Wog0YnasScxARUMXVXv9/+M= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 h1:GMx3ZOcMEVM5qnUItQ4eJyQ6ycwmIEB/VC/UxvdevE0= +github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= -github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= From 64bc0ae9c0dfb4cf4490b5f925aa2e25a6f2de9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 24 Nov 2021 20:33:45 +0100 Subject: [PATCH 083/133] fix: Linter fixes for plugins/outputs/[p-z]* (#10139) Co-authored-by: Pawel Zak --- .../prometheus_client/prometheus_client.go | 23 +++++-- .../prometheus_client_v1_test.go | 38 ++++++----- .../prometheus_client_v2_test.go | 29 ++++---- plugins/outputs/riemann/riemann.go | 9 +-- plugins/outputs/riemann_legacy/riemann.go | 7 +- plugins/outputs/sensu/sensu.go | 4 +- plugins/outputs/signalfx/signalfx.go | 9 +-- plugins/outputs/signalfx/signalfx_test.go | 14 ++-- .../outputs/socket_writer/socket_writer.go | 10 +-- .../socket_writer/socket_writer_test.go | 42 +++++++----- plugins/outputs/stackdriver/stackdriver.go | 66 ++++++++----------- .../outputs/stackdriver/stackdriver_test.go | 19 +++++- plugins/outputs/sumologic/sumologic.go | 49 +++++++------- plugins/outputs/sumologic/sumologic_test.go | 42 ++++++------ plugins/outputs/syslog/syslog.go | 11 ++-- plugins/outputs/syslog/syslog_mapper.go | 8 +-- plugins/outputs/syslog/syslog_mapper_test.go | 20 +++--- plugins/outputs/syslog/syslog_test.go | 41 +++++++----- plugins/outputs/timestream/timestream.go | 34 +++++----- .../timestream/timestream_internal_test.go | 44 ++++++------- plugins/outputs/timestream/timestream_test.go | 47 ++++++------- plugins/outputs/warp10/warp10.go | 6 +- plugins/outputs/wavefront/wavefront.go | 5 +- 23 files changed, 307 insertions(+), 270 deletions(-) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 9c54c2dade83a..795163b4f5bc5 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -10,6 +10,10 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -17,8 +21,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( @@ -121,9 +123,15 @@ func (p *PrometheusClient) Init() error { for collector := range defaultCollectors { switch collector { case "gocollector": - registry.Register(prometheus.NewGoCollector()) + err := registry.Register(collectors.NewGoCollector()) + if err != nil { + return err + } case "process": - registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + err := registry.Register(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + if err != nil { + return err + } default: return fmt.Errorf("unrecognized collector %s", collector) } @@ -160,7 +168,10 @@ func (p *PrometheusClient) Init() error { rangeHandler := internal.IPRangeHandler(ipRange, onError) promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) landingPageHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Telegraf Output Plugin: Prometheus Client ")) + _, err := w.Write([]byte("Telegraf Output Plugin: Prometheus Client ")) + if err != nil { + p.Log.Errorf("Error occurred when writing HTTP reply: %v", err) + } }) mux := http.NewServeMux() @@ -229,7 +240,7 @@ func onError(rw http.ResponseWriter, code int) { http.Error(rw, http.StatusText(code), code) } -// Address returns the address the plugin is listening on. If not listening +// URL returns the address the plugin is listening on. If not listening // an empty string is returned. func (p *PrometheusClient) URL() string { if p.url != nil { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 95fa97fb688b7..7a93a9bc503ab 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -10,14 +10,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestMetricVersion1(t *testing.T) { - Logger := testutil.Logger{Name: "outputs.prometheus_client"} + logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { name string output *PrometheusClient @@ -31,7 +32,7 @@ func TestMetricVersion1(t *testing.T) { MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -58,7 +59,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -85,7 +86,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -114,7 +115,7 @@ cpu_time_idle{host="example.org"} 42 CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", StringAsLabel: true, - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -141,7 +142,7 @@ cpu_time_idle{host_name="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -169,7 +170,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -209,7 +210,7 @@ http_request_duration_seconds_count 144320 MetricVersion: 1, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -272,7 +273,7 @@ rpc_duration_seconds_count 2693 } func TestRoundTripMetricVersion1(t *testing.T) { - Logger := testutil.Logger{Name: "outputs.prometheus_client"} + logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { name string data []byte @@ -348,17 +349,18 @@ rpc_duration_seconds_count 2693 ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - url := fmt.Sprintf("http://%s", ts.Listener.Addr()) + address := fmt.Sprintf("http://%s", ts.Listener.Addr()) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write(tt.data) + _, err := w.Write(tt.data) + require.NoError(t, err) }) input := &inputs.Prometheus{ - URLs: []string{url}, + URLs: []string{address}, URLTag: "", MetricVersion: 1, } @@ -375,7 +377,7 @@ rpc_duration_seconds_count 2693 Listen: "127.0.0.1:0", Path: defaultPath, MetricVersion: 1, - Log: Logger, + Log: logger, CollectorsExclude: []string{"gocollector", "process"}, } err = output.Init() @@ -391,6 +393,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) + defer resp.Body.Close() actual, err := io.ReadAll(resp.Body) require.NoError(t, err) @@ -403,12 +406,12 @@ rpc_duration_seconds_count 2693 } func TestLandingPage(t *testing.T) { - Logger := testutil.Logger{Name: "outputs.prometheus_client"} + logger := testutil.Logger{Name: "outputs.prometheus_client"} output := PrometheusClient{ Listen: ":0", CollectorsExclude: []string{"process"}, MetricVersion: 1, - Log: Logger, + Log: logger, } expected := "Telegraf Output Plugin: Prometheus Client" @@ -419,8 +422,11 @@ func TestLandingPage(t *testing.T) { require.NoError(t, err) u, err := url.Parse(fmt.Sprintf("http://%s/", output.url.Host)) + require.NoError(t, err) + resp, err := http.Get(u.String()) require.NoError(t, err) + defer resp.Body.Close() actual, err := io.ReadAll(resp.Body) require.NoError(t, err) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index c5ff76d4017a7..2096caf6d6d95 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -9,14 +9,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestMetricVersion2(t *testing.T) { - Logger := testutil.Logger{Name: "outputs.prometheus_client"} + logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { name string output *PrometheusClient @@ -30,7 +31,7 @@ func TestMetricVersion2(t *testing.T) { MetricVersion: 2, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -57,7 +58,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 2, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -86,7 +87,7 @@ rpc_duration_seconds_count 2693 CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", ExportTimestamp: true, - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -114,7 +115,7 @@ cpu_time_idle{host="example.org"} 42 0 CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", StringAsLabel: true, - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -141,7 +142,7 @@ cpu_time_idle{host="example.org"} 42 CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", StringAsLabel: false, - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -167,7 +168,7 @@ cpu_time_idle 42 MetricVersion: 2, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -194,7 +195,7 @@ cpu_time_idle{host="example.org"} 42 MetricVersion: 2, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -276,7 +277,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 MetricVersion: 2, CollectorsExclude: []string{"gocollector", "process"}, Path: "/metrics", - Log: Logger, + Log: logger, }, metrics: []telegraf.Metric{ testutil.MustMetric( @@ -332,7 +333,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 } func TestRoundTripMetricVersion2(t *testing.T) { - Logger := testutil.Logger{Name: "outputs.prometheus_client"} + logger := testutil.Logger{Name: "outputs.prometheus_client"} tests := []struct { name string data []byte @@ -414,7 +415,8 @@ rpc_duration_seconds_count 2693 t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write(tt.data) + _, err := w.Write(tt.data) + require.NoError(t, err) }) input := &inputs.Prometheus{ @@ -435,7 +437,7 @@ rpc_duration_seconds_count 2693 Listen: "127.0.0.1:0", Path: defaultPath, MetricVersion: 2, - Log: Logger, + Log: logger, CollectorsExclude: []string{"gocollector", "process"}, } err = output.Init() @@ -451,6 +453,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) + defer resp.Body.Close() actual, err := io.ReadAll(resp.Body) require.NoError(t, err) diff --git a/plugins/outputs/riemann/riemann.go b/plugins/outputs/riemann/riemann.go index bad1e44a0c1a1..bfcc1e337d3d5 100644 --- a/plugins/outputs/riemann/riemann.go +++ b/plugins/outputs/riemann/riemann.go @@ -9,6 +9,7 @@ import ( "time" "github.com/amir/raidman" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" @@ -78,12 +79,12 @@ func (r *Riemann) Connect() error { return nil } -func (r *Riemann) Close() error { +func (r *Riemann) Close() (err error) { if r.client != nil { - r.client.Close() + err = r.client.Close() r.client = nil } - return nil + return err } func (r *Riemann) SampleConfig() string { @@ -113,7 +114,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { } if err := r.client.SendMulti(events); err != nil { - r.Close() + r.Close() //nolint:revive // There is another error which will be returned here return fmt.Errorf("failed to send riemann message: %s", err) } return nil diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index 7fe80297de4d9..0bd0f6b876c68 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/amir/raidman" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" ) @@ -48,9 +49,9 @@ func (r *Riemann) Close() error { if r.client == nil { return nil } - r.client.Close() + err := r.client.Close() r.client = nil - return nil + return err } func (r *Riemann) SampleConfig() string { @@ -82,7 +83,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var senderr = r.client.SendMulti(events) if senderr != nil { - r.Close() // always returns nil + r.Close() //nolint:revive // There is another error which will be returned here return fmt.Errorf("failed to send riemann message (will try to reconnect), error: %s", senderr) } diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 3cd8b2274e52a..b1a9372093369 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -296,10 +296,10 @@ func (s *Sensu) Write(metrics []telegraf.Metric) error { return err } - return s.write(reqBody) + return s.writeMetrics(reqBody) } -func (s *Sensu) write(reqBody []byte) error { +func (s *Sensu) writeMetrics(reqBody []byte) error { var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) method := http.MethodPost diff --git a/plugins/outputs/signalfx/signalfx.go b/plugins/outputs/signalfx/signalfx.go index d8452d7b7ffec..b7550ae5bd4fe 100644 --- a/plugins/outputs/signalfx/signalfx.go +++ b/plugins/outputs/signalfx/signalfx.go @@ -6,12 +6,13 @@ import ( "fmt" "strings" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs" "github.com/signalfx/golib/v3/datapoint" "github.com/signalfx/golib/v3/datapoint/dpsink" "github.com/signalfx/golib/v3/event" "github.com/signalfx/golib/v3/sfxclient" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" ) //init initializes the plugin context @@ -106,7 +107,7 @@ func (s *SignalFx) Connect() error { if s.IngestURL != "" { client.DatapointEndpoint = datapointEndpointForIngestURL(s.IngestURL) client.EventEndpoint = eventEndpointForIngestURL(s.IngestURL) - } else if s.SignalFxRealm != "" { + } else if s.SignalFxRealm != "" { //nolint: revive // "Simplifying" if c {...} else {... return } would not simplify anything at all in this case client.DatapointEndpoint = datapointEndpointForRealm(s.SignalFxRealm) client.EventEndpoint = eventEndpointForRealm(s.SignalFxRealm) } else { @@ -144,7 +145,7 @@ func (s *SignalFx) ConvertToSignalFx(metrics []telegraf.Metric) ([]*datapoint.Da if metricValue, err := datapoint.CastMetricValueWithBool(val); err == nil { var dp = datapoint.New(metricName, metricDims, - metricValue.(datapoint.Value), + metricValue, metricType, timestamp) diff --git a/plugins/outputs/signalfx/signalfx_test.go b/plugins/outputs/signalfx/signalfx_test.go index d21cff82f62a2..fbb49d07720d8 100644 --- a/plugins/outputs/signalfx/signalfx_test.go +++ b/plugins/outputs/signalfx/signalfx_test.go @@ -7,13 +7,14 @@ import ( "testing" "time" + "github.com/signalfx/golib/v3/datapoint" + "github.com/signalfx/golib/v3/event" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/testutil" - "github.com/signalfx/golib/v3/datapoint" - "github.com/signalfx/golib/v3/event" - "github.com/stretchr/testify/require" ) type sink struct { @@ -436,7 +437,9 @@ func TestSignalFx_SignalFx(t *testing.T) { measurements = append(measurements, m) } - s.Write(measurements) + err := s.Write(measurements) + require.NoError(t, err) + require.Eventually(t, func() bool { return len(s.client.(*sink).dps) == len(tt.want.datapoints) }, 5*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { return len(s.client.(*sink).evs) == len(tt.want.events) }, 5*time.Second, 100*time.Millisecond) @@ -596,7 +599,8 @@ func TestSignalFx_Errors(t *testing.T) { measurement.name, measurement.tags, measurement.fields, measurement.time, measurement.tp, ) - s.Write([]telegraf.Metric{m}) + err := s.Write([]telegraf.Metric{m}) + require.Error(t, err) } for !(len(s.client.(*errorsink).dps) == len(tt.want.datapoints) && len(s.client.(*errorsink).evs) == len(tt.want.events)) { time.Sleep(1 * time.Second) diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 2546faa6779d7..130a0f738e0fb 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -3,7 +3,6 @@ package socket_writer import ( "crypto/tls" "fmt" - "log" "net" "strings" "time" @@ -21,6 +20,7 @@ type SocketWriter struct { Address string KeepAlivePeriod *config.Duration tlsint.ClientConfig + Log telegraf.Logger `toml:"-"` serializers.Serializer @@ -99,7 +99,7 @@ func (sw *SocketWriter) Connect() error { } if err := sw.setKeepAlive(c); err != nil { - log.Printf("unable to configure keep alive (%s): %s", sw.Address, err) + sw.Log.Debugf("Unable to configure keep alive (%s): %s", sw.Address, err) } //set encoder sw.encoder, err = internal.NewContentEncoder(sw.ContentEncoding) @@ -142,13 +142,13 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { for _, m := range metrics { bs, err := sw.Serialize(m) if err != nil { - log.Printf("D! [outputs.socket_writer] Could not serialize metric: %v", err) + sw.Log.Debugf("Could not serialize metric: %v", err) continue } bs, err = sw.encoder.Encode(bs) if err != nil { - log.Printf("D! [outputs.socket_writer] Could not encode metric: %v", err) + sw.Log.Debugf("Could not encode metric: %v", err) continue } @@ -156,7 +156,7 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { //TODO log & keep going with remaining strings if err, ok := err.(net.Error); !ok || !err.Temporary() { // permanent error. close the connection - sw.Close() + sw.Close() //nolint:revive // There is another error which will be returned here sw.Conn = nil return fmt.Errorf("closing connection: %v", err) } diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 0decb644cccab..d1283a4115ee9 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -9,10 +9,10 @@ import ( "sync" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSocketWriter_tcp(t *testing.T) { @@ -105,8 +105,8 @@ func testSocketWriterStream(t *testing.T, sw *SocketWriter, lconn net.Conn) { require.True(t, scnr.Scan()) mstr2in := scnr.Text() + "\n" - assert.Equal(t, string(mbs1out), mstr1in) - assert.Equal(t, string(mbs2out), mstr2in) + require.Equal(t, string(mbs1out), mstr1in) + require.Equal(t, string(mbs2out), mstr2in) } func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn) { @@ -132,8 +132,8 @@ func testSocketWriterPacket(t *testing.T, sw *SocketWriter, lconn net.PacketConn } require.Len(t, mstrins, 2) - assert.Equal(t, mbs1str, mstrins[0]) - assert.Equal(t, mbs2str, mstrins[1]) + require.Equal(t, mbs1str, mstrins[0]) + require.Equal(t, mbs2str, mstrins[1]) } func TestSocketWriter_Write_err(t *testing.T) { @@ -145,20 +145,26 @@ func TestSocketWriter_Write_err(t *testing.T) { err = sw.Connect() require.NoError(t, err) - sw.Conn.(*net.TCPConn).SetReadBuffer(256) + err = sw.Conn.(*net.TCPConn).SetReadBuffer(256) + require.NoError(t, err) lconn, err := listener.Accept() require.NoError(t, err) - lconn.(*net.TCPConn).SetWriteBuffer(256) + err = lconn.(*net.TCPConn).SetWriteBuffer(256) + require.NoError(t, err) metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} // close the socket to generate an error - lconn.Close() - sw.Conn.Close() + err = lconn.Close() + require.NoError(t, err) + + err = sw.Conn.Close() + require.NoError(t, err) + err = sw.Write(metrics) require.Error(t, err) - assert.Nil(t, sw.Conn) + require.Nil(t, sw.Conn) } func TestSocketWriter_Write_reconnect(t *testing.T) { @@ -170,12 +176,16 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { err = sw.Connect() require.NoError(t, err) - sw.Conn.(*net.TCPConn).SetReadBuffer(256) + err = sw.Conn.(*net.TCPConn).SetReadBuffer(256) + require.NoError(t, err) lconn, err := listener.Accept() require.NoError(t, err) - lconn.(*net.TCPConn).SetWriteBuffer(256) - lconn.Close() + err = lconn.(*net.TCPConn).SetWriteBuffer(256) + require.NoError(t, err) + + err = lconn.Close() + require.NoError(t, err) sw.Conn = nil wg := sync.WaitGroup{} @@ -191,13 +201,13 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { require.NoError(t, err) wg.Wait() - assert.NoError(t, lerr) + require.NoError(t, lerr) mbsout, _ := sw.Serialize(metrics[0]) buf := make([]byte, 256) n, err := lconn.Read(buf) require.NoError(t, err) - assert.Equal(t, string(mbsout), string(buf[:n])) + require.Equal(t, string(mbsout), string(buf[:n])) } func TestSocketWriter_udp_gzip(t *testing.T) { diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index d6b24ff78839b..e1fb49d2ea9fa 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -4,20 +4,20 @@ import ( "context" "fmt" "hash/fnv" - "log" "path" "sort" "strings" monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/outputs" "google.golang.org/api/option" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" ) // Stackdriver is the Google Stackdriver config info. @@ -26,6 +26,7 @@ type Stackdriver struct { Namespace string ResourceType string `toml:"resource_type"` ResourceLabels map[string]string `toml:"resource_labels"` + Log telegraf.Logger `toml:"-"` client *monitoring.MetricClient } @@ -46,9 +47,9 @@ const ( // MaxInt is the max int64 value. MaxInt = int(^uint(0) >> 1) - errStringPointsOutOfOrder = "One or more of the points specified had an older end time than the most recent point" - errStringPointsTooOld = "Data points cannot be written more than 24h in the past" - errStringPointsTooFrequent = "One or more points were written more frequently than the maximum sampling period configured for the metric" + errStringPointsOutOfOrder = "one or more of the points specified had an older end time than the most recent point" + errStringPointsTooOld = "data points cannot be written more than 24h in the past" + errStringPointsTooFrequent = "one or more points were written more frequently than the maximum sampling period configured for the metric" ) var sampleConfig = ` @@ -118,15 +119,15 @@ type timeSeriesBuckets map[uint64][]*monitoringpb.TimeSeries func (tsb timeSeriesBuckets) Add(m telegraf.Metric, f *telegraf.Field, ts *monitoringpb.TimeSeries) { h := fnv.New64a() - h.Write([]byte(m.Name())) - h.Write([]byte{'\n'}) - h.Write([]byte(f.Key)) - h.Write([]byte{'\n'}) + h.Write([]byte(m.Name())) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte{'\n'}) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(f.Key)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte{'\n'}) //nolint:revive // from hash.go: "It never returns an error" for key, value := range m.Tags() { - h.Write([]byte(key)) - h.Write([]byte{'\n'}) - h.Write([]byte(value)) - h.Write([]byte{'\n'}) + h.Write([]byte(key)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte{'\n'}) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(value)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte{'\n'}) //nolint:revive // from hash.go: "It never returns an error" } k := h.Sum64() @@ -145,7 +146,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { for _, f := range m.FieldList() { value, err := getStackdriverTypedValue(f.Value) if err != nil { - log.Printf("E! [outputs.stackdriver] get type failed: %s", err) + s.Log.Errorf("Get type failed: %s", err) continue } @@ -155,13 +156,13 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { metricKind, err := getStackdriverMetricKind(m.Type()) if err != nil { - log.Printf("E! [outputs.stackdriver] get metric failed: %s", err) + s.Log.Errorf("Get metric failed: %s", err) continue } timeInterval, err := getStackdriverTimeInterval(metricKind, StartTime, m.Time().Unix()) if err != nil { - log.Printf("E! [outputs.stackdriver] get time interval failed: %s", err) + s.Log.Errorf("Get time interval failed: %s", err) continue } @@ -175,7 +176,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { timeSeries := &monitoringpb.TimeSeries{ Metric: &metricpb.Metric{ Type: path.Join("custom.googleapis.com", s.Namespace, m.Name(), f.Key), - Labels: getStackdriverLabels(m.TagList()), + Labels: s.getStackdriverLabels(m.TagList()), }, MetricKind: metricKind, Resource: &monitoredrespb.MonitoredResource{ @@ -228,10 +229,10 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { if strings.Contains(err.Error(), errStringPointsOutOfOrder) || strings.Contains(err.Error(), errStringPointsTooOld) || strings.Contains(err.Error(), errStringPointsTooFrequent) { - log.Printf("D! [outputs.stackdriver] unable to write to Stackdriver: %s", err) + s.Log.Debugf("Unable to write to Stackdriver: %s", err) return nil } - log.Printf("E! [outputs.stackdriver] unable to write to Stackdriver: %s", err) + s.Log.Errorf("Unable to write to Stackdriver: %s", err) return err } } @@ -306,7 +307,7 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro case float64: return &monitoringpb.TypedValue{ Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: float64(v), + DoubleValue: v, }, }, nil case bool: @@ -323,39 +324,26 @@ func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, erro } } -func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { +func (s *Stackdriver) getStackdriverLabels(tags []*telegraf.Tag) map[string]string { labels := make(map[string]string) for _, t := range tags { labels[t.Key] = t.Value } for k, v := range labels { if len(k) > QuotaStringLengthForLabelKey { - log.Printf( - "W! [outputs.stackdriver] removing tag [%s] key exceeds string length for label key [%d]", - k, - QuotaStringLengthForLabelKey, - ) + s.Log.Warnf("Removing tag [%s] key exceeds string length for label key [%d]", k, QuotaStringLengthForLabelKey) delete(labels, k) continue } if len(v) > QuotaStringLengthForLabelValue { - log.Printf( - "W! [outputs.stackdriver] removing tag [%s] value exceeds string length for label value [%d]", - k, - QuotaStringLengthForLabelValue, - ) + s.Log.Warnf("Removing tag [%s] value exceeds string length for label value [%d]", k, QuotaStringLengthForLabelValue) delete(labels, k) continue } } if len(labels) > QuotaLabelsPerMetricDescriptor { excess := len(labels) - QuotaLabelsPerMetricDescriptor - log.Printf( - "W! [outputs.stackdriver] tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", - len(labels), - QuotaLabelsPerMetricDescriptor, - excess, - ) + s.Log.Warnf("Tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", len(labels), QuotaLabelsPerMetricDescriptor, excess) for k := range labels { if excess == 0 { break diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index bb2a620e93668..741e08e65a845 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -12,8 +12,6 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" "google.golang.org/api/option" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" @@ -22,6 +20,9 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) // clientOpt is the option tests should use to connect to the test server. @@ -65,6 +66,9 @@ func TestMain(m *testing.M) { if err != nil { log.Fatal(err) } + + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive go serv.Serve(lis) conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) @@ -90,6 +94,7 @@ func TestWrite(t *testing.T) { s := &Stackdriver{ Project: fmt.Sprintf("projects/%s", "[PROJECT]"), Namespace: "test", + Log: testutil.Logger{}, client: c, } @@ -121,6 +126,7 @@ func TestWriteResourceTypeAndLabels(t *testing.T) { ResourceLabels: map[string]string{ "mylabel": "myvalue", }, + Log: testutil.Logger{}, client: c, } @@ -149,6 +155,7 @@ func TestWriteAscendingTime(t *testing.T) { s := &Stackdriver{ Project: fmt.Sprintf("projects/%s", "[PROJECT]"), Namespace: "test", + Log: testutil.Logger{}, client: c, } @@ -221,6 +228,7 @@ func TestWriteBatchable(t *testing.T) { s := &Stackdriver{ Project: fmt.Sprintf("projects/%s", "[PROJECT]"), Namespace: "test", + Log: testutil.Logger{}, client: c, } @@ -398,6 +406,7 @@ func TestWriteIgnoredErrors(t *testing.T) { s := &Stackdriver{ Project: fmt.Sprintf("projects/%s", "[PROJECT]"), Namespace: "test", + Log: testutil.Logger{}, client: c, } @@ -431,6 +440,10 @@ func TestGetStackdriverLabels(t *testing.T) { {Key: "valuequota", Value: "icym5wcpejnhljcvy2vwk15svmhrtueoppwlvix61vlbaeedufn1g6u4jgwjoekwew9s2dboxtgrkiyuircnl8h1lbzntt9gzcf60qunhxurhiz0g2bynzy1v6eyn4ravndeiiugobsrsj2bfaguahg4gxn7nx4irwfknunhkk6jdlldevawj8levebjajcrcbeugewd14fa8o34ycfwx2ymalyeqxhfqrsksxnii2deqq6cghrzi6qzwmittkzdtye3imoygqmjjshiskvnzz1e4ipd9c6wfor5jsygn1kvcg6jm4clnsl1fnxotbei9xp4swrkjpgursmfmkyvxcgq9hoy435nwnolo3ipnvdlhk6pmlzpdjn6gqi3v9gv7jn5ro2p1t5ufxzfsvqq1fyrgoi7gvmttil1banh3cftkph1dcoaqfhl7y0wkvhwwvrmslmmxp1wedyn8bacd7akmjgfwdvcmrymbzvmrzfvq1gs1xnmmg8rsfxci2h6r1ralo3splf4f3bdg4c7cy0yy9qbxzxhcmdpwekwc7tdjs8uj6wmofm2aor4hum8nwyfwwlxy3yvsnbjy32oucsrmhcnu6l2i8laujkrhvsr9fcix5jflygznlydbqw5uhw1rg1g5wiihqumwmqgggemzoaivm3ut41vjaff4uqtqyuhuwblmuiphfkd7si49vgeeswzg7tpuw0oxmkesgibkcjtev2h9ouxzjs3eb71jffhdacyiuyhuxwvm5bnrjewbm4x2kmhgbirz3eoj7ijgplggdkx5vixufg65ont8zi1jabsuxx0vsqgprunwkugqkxg2r7iy6fmgs4lob4dlseinowkst6gp6x1ejreauyzjz7atzm3hbmr5rbynuqp4lxrnhhcbuoun69mavvaaki0bdz5ybmbbbz5qdv0odtpjo2aezat5uosjuhzbvic05jlyclikynjgfhencdkz3qcqzbzhnsynj1zdke0sk4zfpvfyryzsxv9pu0qm"}, } - labels := getStackdriverLabels(tags) + s := &Stackdriver{ + Log: testutil.Logger{}, + } + + labels := s.getStackdriverLabels(tags) require.Equal(t, QuotaLabelsPerMetricDescriptor, len(labels)) } diff --git a/plugins/outputs/sumologic/sumologic.go b/plugins/outputs/sumologic/sumologic.go index 088210b9d1ff9..889a28bc2c78e 100644 --- a/plugins/outputs/sumologic/sumologic.go +++ b/plugins/outputs/sumologic/sumologic.go @@ -3,7 +3,6 @@ package sumologic import ( "bytes" "compress/gzip" - "log" "net/http" "time" @@ -198,19 +197,19 @@ func (s *SumoLogic) Write(metrics []telegraf.Metric) error { return s.writeRequestChunks(chunks) } - return s.write(reqBody) + return s.writeRequestChunk(reqBody) } func (s *SumoLogic) writeRequestChunks(chunks [][]byte) error { for _, reqChunk := range chunks { - if err := s.write(reqChunk); err != nil { + if err := s.writeRequestChunk(reqChunk); err != nil { s.Log.Errorf("Error sending chunk: %v", err) } } return nil } -func (s *SumoLogic) write(reqBody []byte) error { +func (s *SumoLogic) writeRequestChunk(reqBody []byte) error { var ( err error buff bytes.Buffer @@ -284,31 +283,31 @@ func (s *SumoLogic) splitIntoChunks(metrics []telegraf.Metric) ([][]byte, error) if la+len(chunkBody) > int(s.MaxRequstBodySize) { // ... and it's just the right size, without currently processed chunk. break - } else { - // ... we can try appending more. - i++ - toAppend = append(toAppend, chunkBody...) - continue } - } else { // la == 0 + // ... we can try appending more. i++ - toAppend = chunkBody - - if len(chunkBody) > int(s.MaxRequstBodySize) { - log.Printf( - "W! [SumoLogic] max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split", - s.MaxRequstBodySize, len(chunkBody), - ) - - // The serialized metric is too big but we have no choice - // but to send it. - // max_request_body_size was set so small that it wouldn't - // even accomodate a single metric. - break - } - + toAppend = append(toAppend, chunkBody...) continue } + + // la == 0 + i++ + toAppend = chunkBody + + if len(chunkBody) > int(s.MaxRequstBodySize) { + s.Log.Warnf( + "max_request_body_size set to %d which is too small even for a single metric (len: %d), sending without split", + s.MaxRequstBodySize, len(chunkBody), + ) + + // The serialized metric is too big, but we have no choice + // but to send it. + // max_request_body_size was set so small that it wouldn't + // even accommodate a single metric. + break + } + + continue } if toAppend == nil { diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5629defa4506e..a3e202f4910bf 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -5,6 +5,7 @@ import ( "bytes" "compress/gzip" "fmt" + "github.com/influxdata/telegraf/testutil" "io" "net/http" "net/http/httptest" @@ -13,7 +14,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,7 +25,7 @@ import ( "github.com/influxdata/telegraf/plugins/serializers/prometheus" ) -func getMetric(t *testing.T) telegraf.Metric { +func getMetric() telegraf.Metric { m := metric.New( "cpu", map[string]string{}, @@ -37,7 +37,7 @@ func getMetric(t *testing.T) telegraf.Metric { return m } -func getMetrics(t *testing.T) []telegraf.Metric { +func getMetrics() []telegraf.Metric { const count = 100 var metrics = make([]telegraf.Metric, count) @@ -105,7 +105,7 @@ func TestMethod(t *testing.T) { } require.NoError(t, err) - err = plugin.Write([]telegraf.Metric{getMetric(t)}) + err = plugin.Write([]telegraf.Metric{getMetric()}) require.NoError(t, err) }) } @@ -177,7 +177,7 @@ func TestStatusCode(t *testing.T) { err = tt.plugin.Connect() require.NoError(t, err) - err = tt.plugin.Write([]telegraf.Metric{getMetric(t)}) + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) tt.errFunc(t, err) }) } @@ -247,7 +247,8 @@ func TestContentType(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { gz, err := gzip.NewReader(r.Body) require.NoError(t, err) - io.Copy(&body, gz) + _, err = io.Copy(&body, gz) + require.NoError(t, err) w.WriteHeader(http.StatusOK) })) defer ts.Close() @@ -260,7 +261,7 @@ func TestContentType(t *testing.T) { require.NoError(t, plugin.Connect()) - err = plugin.Write([]telegraf.Metric{getMetric(t)}) + err = plugin.Write([]telegraf.Metric{getMetric()}) require.NoError(t, err) if tt.expectedBody != nil { @@ -302,7 +303,7 @@ func TestContentEncodingGzip(t *testing.T) { payload, err := io.ReadAll(body) require.NoError(t, err) - assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") + require.Equal(t, string(payload), "metric=cpu field=value 42 0\n") w.WriteHeader(http.StatusNoContent) }) @@ -316,14 +317,12 @@ func TestContentEncodingGzip(t *testing.T) { err = plugin.Connect() require.NoError(t, err) - err = plugin.Write([]telegraf.Metric{getMetric(t)}) + err = plugin.Write([]telegraf.Metric{getMetric()}) require.NoError(t, err) }) } } -type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) - func TestDefaultUserAgent(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() @@ -349,7 +348,7 @@ func TestDefaultUserAgent(t *testing.T) { err = plugin.Connect() require.NoError(t, err) - err = plugin.Write([]telegraf.Metric{getMetric(t)}) + err = plugin.Write([]telegraf.Metric{getMetric()}) require.NoError(t, err) }) } @@ -463,7 +462,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.URL = u.String() return s }, - metrics: []telegraf.Metric{getMetric(t)}, + metrics: []telegraf.Metric{getMetric()}, expectedError: false, expectedRequestCount: 1, expectedMetricLinesCount: 1, @@ -475,7 +474,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.URL = u.String() return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 1, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -490,7 +489,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 43_749 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 2, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -503,7 +502,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 10_000 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 5, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -516,7 +515,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 5_000 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 10, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -529,7 +528,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 2_500 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 20, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -542,7 +541,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 1_000 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 50, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -555,7 +554,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 500 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -568,7 +567,7 @@ func TestMaxRequestBodySize(t *testing.T) { s.MaxRequstBodySize = 300 return s }, - metrics: getMetrics(t), + metrics: getMetrics(), expectedError: false, expectedRequestCount: 100, expectedMetricLinesCount: 500, // count (100) metrics, 5 lines per each (steal, idle, system, user, temp) = 500 @@ -596,6 +595,7 @@ func TestMaxRequestBodySize(t *testing.T) { plugin := tt.plugin() plugin.SetSerializer(serializer) + plugin.Log = testutil.Logger{} err = plugin.Connect() require.NoError(t, err) diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 570ed15a79e6b..d5925d4dfff84 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -3,7 +3,6 @@ package syslog import ( "crypto/tls" "fmt" - "log" "net" "strconv" "strings" @@ -11,6 +10,7 @@ import ( "github.com/influxdata/go-syslog/v3/nontransparent" "github.com/influxdata/go-syslog/v3/rfc5424" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" @@ -29,6 +29,7 @@ type Syslog struct { Separator string `toml:"sdparam_separator"` Framing framing.Framing Trailer nontransparent.TrailerType + Log telegraf.Logger `toml:"-"` net.Conn tlsint.ClientConfig mapper *SyslogMapper @@ -135,7 +136,7 @@ func (s *Syslog) Connect() error { } if err := s.setKeepAlive(c); err != nil { - log.Printf("unable to configure keep alive (%s): %s", s.Address, err) + s.Log.Warnf("unable to configure keep alive (%s): %s", s.Address, err) } s.Conn = c @@ -186,17 +187,17 @@ func (s *Syslog) Write(metrics []telegraf.Metric) (err error) { for _, metric := range metrics { var msg *rfc5424.SyslogMessage if msg, err = s.mapper.MapMetricToSyslogMessage(metric); err != nil { - log.Printf("E! [outputs.syslog] Failed to create syslog message: %v", err) + s.Log.Errorf("Failed to create syslog message: %v", err) continue } var msgBytesWithFraming []byte if msgBytesWithFraming, err = s.getSyslogMessageBytesWithFraming(msg); err != nil { - log.Printf("E! [outputs.syslog] Failed to convert syslog message with framing: %v", err) + s.Log.Errorf("Failed to convert syslog message with framing: %v", err) continue } if _, err = s.Conn.Write(msgBytesWithFraming); err != nil { if netErr, ok := err.(net.Error); !ok || !netErr.Temporary() { - s.Close() + s.Close() //nolint:revive // There is another error which will be returned here s.Conn = nil return fmt.Errorf("closing connection: %v", netErr) } diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go index 28c74f3f97a6d..7d3d6d0a361b0 100644 --- a/plugins/outputs/syslog/syslog_mapper.go +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -9,6 +9,7 @@ import ( "time" "github.com/influxdata/go-syslog/v3/rfc5424" + "github.com/influxdata/telegraf" ) @@ -90,8 +91,7 @@ func mapMsgID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { func mapVersion(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { if value, ok := metric.GetField("version"); ok { - switch v := value.(type) { - case uint64: + if v, ok := value.(uint64); ok { msg.SetVersion(uint16(v)) return } @@ -142,9 +142,9 @@ func mapHostname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { func mapTimestamp(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { timestamp := metric.Time() + //nolint: revive // Need switch with only one case to handle `.(type)` if value, ok := metric.GetField("timestamp"); ok { - switch v := value.(type) { - case int64: + if v, ok := value.(int64); ok { timestamp = time.Unix(0, v).UTC() } } diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go index d4bbc1d6f0ed9..90cec95e4c256 100644 --- a/plugins/outputs/syslog/syslog_mapper_test.go +++ b/plugins/outputs/syslog/syslog_mapper_test.go @@ -5,9 +5,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/metric" ) func TestSyslogMapperWithDefaults(t *testing.T) { @@ -22,11 +22,11 @@ func TestSyslogMapperWithDefaults(t *testing.T) { time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) hostname, err := os.Hostname() - assert.NoError(t, err) + require.NoError(t, err) syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<13>1 2010-11-10T23:00:00Z "+hostname+" Telegraf - testmetric -", str, "Wrong syslog message") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z "+hostname+" Telegraf - testmetric -", str, "Wrong syslog message") } func TestSyslogMapperWithHostname(t *testing.T) { @@ -47,7 +47,7 @@ func TestSyslogMapperWithHostname(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", str, "Wrong syslog message") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", str, "Wrong syslog message") } func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { s := newSyslog() @@ -66,7 +66,7 @@ func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<13>1 2010-11-10T23:00:00Z sourcevalue Telegraf - testmetric -", str, "Wrong syslog message") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z sourcevalue Telegraf - testmetric -", str, "Wrong syslog message") } func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { @@ -85,7 +85,7 @@ func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<13>1 2010-11-10T23:00:00Z hostvalue Telegraf - testmetric -", str, "Wrong syslog message") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z hostvalue Telegraf - testmetric -", str, "Wrong syslog message") } func TestSyslogMapperWithDefaultSdid(t *testing.T) { @@ -120,7 +120,7 @@ func TestSyslogMapperWithDefaultSdid(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<27>2 2010-11-10T23:30:00Z testhost testapp 25 555 [default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"foo\" value3=\"1.2\"] Test message", str, "Wrong syslog message") + require.Equal(t, "<27>2 2010-11-10T23:30:00Z testhost testapp 25 555 [default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"foo\" value3=\"1.2\"] Test message", str, "Wrong syslog message") } func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { @@ -158,7 +158,7 @@ func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<25>2 2010-11-10T23:30:00Z testhost testapp 25 555 [bar@123 tag3=\"barfoobar\" value3=\"2\"][default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"default\"][foo@456 value4=\"foo\"] Test message", str, "Wrong syslog message") + require.Equal(t, "<25>2 2010-11-10T23:30:00Z testhost testapp 25 555 [bar@123 tag3=\"barfoobar\" value3=\"2\"][default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"default\"][foo@456 value4=\"foo\"] Test message", str, "Wrong syslog message") } func TestSyslogMapperWithNoSdids(t *testing.T) { @@ -196,5 +196,5 @@ func TestSyslogMapperWithNoSdids(t *testing.T) { syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) require.NoError(t, err) str, _ := syslogMessage.String() - assert.Equal(t, "<26>2 2010-11-10T23:30:00Z testhost testapp 25 555 - Test message", str, "Wrong syslog message") + require.Equal(t, "<26>2 2010-11-10T23:30:00Z testhost testapp 25 555 - Test message", str, "Wrong syslog message") } diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go index d9e082e5f9042..f245bcc84f5a7 100644 --- a/plugins/outputs/syslog/syslog_test.go +++ b/plugins/outputs/syslog/syslog_test.go @@ -6,12 +6,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { @@ -34,7 +34,7 @@ func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) - assert.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") + require.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") } func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { @@ -58,7 +58,7 @@ func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) require.NoError(t, err) - assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") + require.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") } func TestSyslogWriteWithTcp(t *testing.T) { @@ -110,7 +110,7 @@ func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { buf := make([]byte, 256) n, err := lconn.Read(buf) require.NoError(t, err) - assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) + require.Equal(t, string(messageBytesWithFraming), string(buf[:n])) } func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { @@ -134,7 +134,7 @@ func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { buf := make([]byte, 256) n, _, err := lconn.ReadFrom(buf) require.NoError(t, err) - assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) + require.Equal(t, string(messageBytesWithFraming), string(buf[:n])) } func TestSyslogWriteErr(t *testing.T) { @@ -146,20 +146,26 @@ func TestSyslogWriteErr(t *testing.T) { err = s.Connect() require.NoError(t, err) - s.Conn.(*net.TCPConn).SetReadBuffer(256) + err = s.Conn.(*net.TCPConn).SetReadBuffer(256) + require.NoError(t, err) lconn, err := listener.Accept() require.NoError(t, err) - lconn.(*net.TCPConn).SetWriteBuffer(256) + err = lconn.(*net.TCPConn).SetWriteBuffer(256) + require.NoError(t, err) metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} // close the socket to generate an error - lconn.Close() - s.Conn.Close() + err = lconn.Close() + require.NoError(t, err) + + err = s.Conn.Close() + require.NoError(t, err) + err = s.Write(metrics) require.Error(t, err) - assert.Nil(t, s.Conn) + require.Nil(t, s.Conn) } func TestSyslogWriteReconnect(t *testing.T) { @@ -171,12 +177,15 @@ func TestSyslogWriteReconnect(t *testing.T) { err = s.Connect() require.NoError(t, err) - s.Conn.(*net.TCPConn).SetReadBuffer(256) + err = s.Conn.(*net.TCPConn).SetReadBuffer(256) + require.NoError(t, err) lconn, err := listener.Accept() require.NoError(t, err) - lconn.(*net.TCPConn).SetWriteBuffer(256) - lconn.Close() + err = lconn.(*net.TCPConn).SetWriteBuffer(256) + require.NoError(t, err) + err = lconn.Close() + require.NoError(t, err) s.Conn = nil wg := sync.WaitGroup{} @@ -192,7 +201,7 @@ func TestSyslogWriteReconnect(t *testing.T) { require.NoError(t, err) wg.Wait() - assert.NoError(t, lerr) + require.NoError(t, lerr) syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) require.NoError(t, err) @@ -201,5 +210,5 @@ func TestSyslogWriteReconnect(t *testing.T) { buf := make([]byte, 256) n, err := lconn.Read(buf) require.NoError(t, err) - assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) + require.Equal(t, string(messageBytesWithFraming), string(buf[:n])) } diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 6478563b6b245..91d73de381a91 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -10,14 +10,14 @@ import ( "strconv" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" "github.com/aws/smithy-go" + + "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/plugins/outputs" ) type ( @@ -332,12 +332,12 @@ func (t *Timestream) logWriteToTimestreamError(err error, tableName *string) { func (t *Timestream) createTableAndRetry(writeRecordsInput *timestreamwrite.WriteRecordsInput) error { if t.CreateTableIfNotExists { t.Log.Infof("Trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'true'.", *writeRecordsInput.TableName, t.DatabaseName) - if err := t.createTable(writeRecordsInput.TableName); err != nil { - t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) - } else { + err := t.createTable(writeRecordsInput.TableName) + if err == nil { t.Log.Infof("Table '%s' in database '%s' created. Retrying writing.", *writeRecordsInput.TableName, t.DatabaseName) return t.writeToTimestream(writeRecordsInput, false) } + t.Log.Errorf("Failed to create table '%s' in database '%s': %s. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName, err) } else { t.Log.Errorf("Not trying to create table '%s' in database '%s', as 'CreateTableIfNotExists' config key is 'false'. Skipping metric!", *writeRecordsInput.TableName, t.DatabaseName) } @@ -434,22 +434,22 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { h := fnv.New64a() - h.Write([]byte(m.Name())) - h.Write([]byte("\n")) + h.Write([]byte(m.Name())) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" for _, tag := range m.TagList() { if tag.Key == "" { continue } - h.Write([]byte(tag.Key)) - h.Write([]byte("\n")) - h.Write([]byte(tag.Value)) - h.Write([]byte("\n")) + h.Write([]byte(tag.Key)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(tag.Value)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" } b := make([]byte, binary.MaxVarintLen64) n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) - h.Write(b[:n]) - h.Write([]byte("\n")) + h.Write(b[:n]) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\n")) //nolint:revive // from hash.go: "It never returns an error" return h.Sum64() } @@ -537,7 +537,7 @@ func getTimestreamTime(t time.Time) (timeUnit types.TimeUnit, timeValue string) timeUnit = types.TimeUnitNanoseconds timeValue = strconv.FormatInt(nanosTime, 10) } - return + return timeUnit, timeValue } // convertValue converts single Field value from Telegraf Metric and produces @@ -595,7 +595,7 @@ func convertValue(v interface{}) (value string, valueType types.MeasureValueType default: // Skip unsupported type. ok = false - return + return value, valueType, ok } - return + return value, valueType, ok } diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go index d151c10d4b146..a86bc432de84b 100644 --- a/plugins/outputs/timestream/timestream_internal_test.go +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -4,40 +4,36 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" - - "github.com/stretchr/testify/assert" ) func TestGetTimestreamTime(t *testing.T) { - assertions := assert.New(t) - tWithNanos := time.Date(2020, time.November, 10, 23, 44, 20, 123, time.UTC) tWithMicros := time.Date(2020, time.November, 10, 23, 44, 20, 123000, time.UTC) tWithMillis := time.Date(2020, time.November, 10, 23, 44, 20, 123000000, time.UTC) tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC) tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos) - assertions.Equal(types.TimeUnitNanoseconds, tUnitNanos) - assertions.Equal("1605051860000000123", tValueNanos) + require.Equal(t, types.TimeUnitNanoseconds, tUnitNanos) + require.Equal(t, "1605051860000000123", tValueNanos) tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros) - assertions.Equal(types.TimeUnitMicroseconds, tUnitMicros) - assertions.Equal("1605051860000123", tValueMicros) + require.Equal(t, types.TimeUnitMicroseconds, tUnitMicros) + require.Equal(t, "1605051860000123", tValueMicros) tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis) - assertions.Equal(types.TimeUnitMilliseconds, tUnitMillis) - assertions.Equal("1605051860123", tValueMillis) + require.Equal(t, types.TimeUnitMilliseconds, tUnitMillis) + require.Equal(t, "1605051860123", tValueMillis) tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds) - assertions.Equal(types.TimeUnitSeconds, tUnitSeconds) - assertions.Equal("1605051860", tValueSeconds) + require.Equal(t, types.TimeUnitSeconds, tUnitSeconds) + require.Equal(t, "1605051860", tValueSeconds) } func TestPartitionRecords(t *testing.T) { - assertions := assert.New(t) - testDatum := types.Record{ MeasureName: aws.String("Foo"), MeasureValueType: types.MeasureValueTypeDouble, @@ -49,11 +45,11 @@ func TestPartitionRecords(t *testing.T) { twoDatum := []types.Record{testDatum, testDatum} threeDatum := []types.Record{testDatum, testDatum, testDatum} - assertions.Equal([][]types.Record{}, partitionRecords(2, zeroDatum)) - assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]types.Record{twoDatum}, partitionRecords(2, twoDatum)) - assertions.Equal([][]types.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) + require.Equal(t, [][]types.Record{}, partitionRecords(2, zeroDatum)) + require.Equal(t, [][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + require.Equal(t, [][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + require.Equal(t, [][]types.Record{twoDatum}, partitionRecords(2, twoDatum)) + require.Equal(t, [][]types.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) } func TestConvertValueSupported(t *testing.T) { @@ -74,18 +70,16 @@ func TestConvertValueSupported(t *testing.T) { } func TestConvertValueUnsupported(t *testing.T) { - assertions := assert.New(t) _, _, ok := convertValue(time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC)) - assertions.False(ok, "Expected unsuccessful conversion") + require.False(t, ok, "Expected unsuccessful conversion") } func testConvertValueSupportedCases(t *testing.T, inputValues []interface{}, outputValues []string, outputValueTypes []types.MeasureValueType) { - assertions := assert.New(t) for i, inputValue := range inputValues { v, vt, ok := convertValue(inputValue) - assertions.Equal(true, ok, "Expected successful conversion") - assertions.Equal(outputValues[i], v, "Expected different string representation of converted value") - assertions.Equal(outputValueTypes[i], vt, "Expected different value type of converted value") + require.Equal(t, true, ok, "Expected successful conversion") + require.Equal(t, outputValues[i], v, "Expected different string representation of converted value") + require.Equal(t, outputValueTypes[i], vt, "Expected different value type of converted value") } } diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index be61a06a15358..7be25c2559070 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -13,11 +13,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" ) const tsDbName = "testDb" @@ -49,26 +49,25 @@ func (m *mockTimestreamClient) DescribeDatabase(context.Context, *timestreamwrit } func TestConnectValidatesConfigParameters(t *testing.T) { - assertions := assert.New(t) WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamClient{}, nil } // checking base arguments noDatabaseName := Timestream{Log: testutil.Logger{}} - assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") + require.Contains(t, noDatabaseName.Connect().Error(), "DatabaseName") noMappingMode := Timestream{ DatabaseName: tsDbName, Log: testutil.Logger{}, } - assertions.Contains(noMappingMode.Connect().Error(), "MappingMode") + require.Contains(t, noMappingMode.Connect().Error(), "MappingMode") incorrectMappingMode := Timestream{ DatabaseName: tsDbName, MappingMode: "foo", Log: testutil.Logger{}, } - assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table") + require.Contains(t, incorrectMappingMode.Connect().Error(), "single-table") // multi-table arguments validMappingModeMultiTable := Timestream{ @@ -76,7 +75,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { MappingMode: MappingModeMultiTable, Log: testutil.Logger{}, } - assertions.Nil(validMappingModeMultiTable.Connect()) + require.Nil(t, validMappingModeMultiTable.Connect()) singleTableNameWithMultiTable := Timestream{ DatabaseName: tsDbName, @@ -84,7 +83,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { SingleTableName: testSingleTableName, Log: testutil.Logger{}, } - assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") + require.Contains(t, singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") singleTableDimensionWithMultiTable := Timestream{ DatabaseName: tsDbName, @@ -92,7 +91,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } - assertions.Contains(singleTableDimensionWithMultiTable.Connect().Error(), + require.Contains(t, singleTableDimensionWithMultiTable.Connect().Error(), "SingleTableDimensionNameForTelegrafMeasurementName") // single-table arguments @@ -101,7 +100,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { MappingMode: MappingModeSingleTable, Log: testutil.Logger{}, } - assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") + require.Contains(t, noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") noDimensionNameMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, @@ -109,7 +108,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { SingleTableName: testSingleTableName, Log: testutil.Logger{}, } - assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(), + require.Contains(t, noDimensionNameMappingModeSingleTable.Connect().Error(), "SingleTableDimensionNameForTelegrafMeasurementName") validConfigurationMappingModeSingleTable := Timestream{ @@ -119,7 +118,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } - assertions.Nil(validConfigurationMappingModeSingleTable.Connect()) + require.Nil(t, validConfigurationMappingModeSingleTable.Connect()) // create table arguments createTableNoMagneticRetention := Timestream{ @@ -128,7 +127,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { CreateTableIfNotExists: true, Log: testutil.Logger{}, } - assertions.Contains(createTableNoMagneticRetention.Connect().Error(), + require.Contains(t, createTableNoMagneticRetention.Connect().Error(), "CreateTableMagneticStoreRetentionPeriodInDays") createTableNoMemoryRetention := Timestream{ @@ -138,7 +137,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { CreateTableMagneticStoreRetentionPeriodInDays: 3, Log: testutil.Logger{}, } - assertions.Contains(createTableNoMemoryRetention.Connect().Error(), + require.Contains(t, createTableNoMemoryRetention.Connect().Error(), "CreateTableMemoryStoreRetentionPeriodInHours") createTableValid := Timestream{ @@ -149,7 +148,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { CreateTableMemoryStoreRetentionPeriodInHours: 3, Log: testutil.Logger{}, } - assertions.Nil(createTableValid.Connect()) + require.Nil(t, createTableValid.Connect()) // describe table on start arguments describeTableInvoked := Timestream{ @@ -158,7 +157,7 @@ func TestConnectValidatesConfigParameters(t *testing.T) { DescribeDatabaseOnStart: true, Log: testutil.Logger{}, } - assertions.Contains(describeTableInvoked.Connect().Error(), "hello from DescribeDatabase") + require.Contains(t, describeTableInvoked.Connect().Error(), "hello from DescribeDatabase") } type mockTimestreamErrorClient struct { @@ -176,7 +175,6 @@ func (m *mockTimestreamErrorClient) DescribeDatabase(context.Context, *timestrea } func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { - assertions := assert.New(t) WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ ErrorToReturnOnWriteRecords: &types.ThrottlingException{Message: aws.String("Throttling Test")}, @@ -188,7 +186,7 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { DatabaseName: tsDbName, Log: testutil.Logger{}, } - assertions.NoError(plugin.Connect()) + require.NoError(t, plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -198,12 +196,11 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { err := plugin.Write([]telegraf.Metric{input}) - assertions.NotNil(err, "Expected an error to be returned to Telegraf, "+ + require.NotNil(t, err, "Expected an error to be returned to Telegraf, "+ "so that the write will be retried by Telegraf later.") } func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { - assertions := assert.New(t) WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ ErrorToReturnOnWriteRecords: &types.RejectedRecordsException{Message: aws.String("RejectedRecords Test")}, @@ -215,7 +212,7 @@ func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { DatabaseName: tsDbName, Log: testutil.Logger{}, } - assertions.NoError(plugin.Connect()) + require.NoError(t, plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -225,7 +222,7 @@ func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { err := plugin.Write([]telegraf.Metric{input}) - assertions.Nil(err, "Expected to silently swallow the RejectedRecordsException, "+ + require.Nil(t, err, "Expected to silently swallow the RejectedRecordsException, "+ "as retrying this error doesn't make sense.") } @@ -649,13 +646,11 @@ func comparisonTest(t *testing.T, Log: testutil.Logger{}, } } - assertions := assert.New(t) - result := plugin.TransformMetrics(telegrafMetrics) - assertions.Equal(len(timestreamRecords), len(result), "The number of transformed records was expected to be different") + require.Equal(t, len(timestreamRecords), len(result), "The number of transformed records was expected to be different") for _, tsRecord := range timestreamRecords { - assertions.True(arrayContains(result, tsRecord), "Expected that the list of requests to Timestream: \n%s\n\n "+ + require.True(t, arrayContains(result, tsRecord), "Expected that the list of requests to Timestream: \n%s\n\n "+ "will contain request: \n%s\n\nUsed MappingMode: %s", result, tsRecord, mappingMode) } } diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 4d3027b1b5331..740bb01989f23 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "log" "math" "net/http" "net/url" @@ -33,6 +32,7 @@ type Warp10 struct { MaxStringErrorSize int `toml:"max_string_error_size"` client *http.Client tls.ClientConfig + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -114,7 +114,7 @@ func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string { metricValue, err := buildValue(field.Value) if err != nil { - log.Printf("E! [outputs.warp10] Could not encode value: %v", err) + w.Log.Errorf("Could not encode value: %v", err) continue } metric.Value = metricValue @@ -199,7 +199,7 @@ func buildValue(v interface{}) (string, error) { retv = strconv.FormatInt(math.MaxInt64, 10) } case float64: - retv = floatToString(float64(p)) + retv = floatToString(p) default: return "", fmt.Errorf("unsupported type: %T", v) } diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 3ad4e803b9f6a..7049ded5264ed 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -5,9 +5,10 @@ import ( "regexp" "strings" + wavefront "github.com/wavefronthq/wavefront-sdk-go/senders" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - wavefront "github.com/wavefronthq/wavefront-sdk-go/senders" ) const maxTagLength = 254 @@ -51,7 +52,7 @@ var strictSanitizedChars = strings.NewReplacer( ) // instead of Replacer which may miss some special characters we can use a regex pattern, but this is significantly slower than Replacer -var sanitizedRegex = regexp.MustCompile("[^a-zA-Z\\d_.-]") +var sanitizedRegex = regexp.MustCompile(`[^a-zA-Z\d_.-]`) var tagValueReplacer = strings.NewReplacer("*", "-") From 2b1a79f327c6212f3f182443312fd4f89914dd7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 24 Nov 2021 20:38:08 +0100 Subject: [PATCH 084/133] fix: Linter fixes for plugins/processors/[a-z]* (#10161) Co-authored-by: Pawel Zak --- plugins/processors/aws/ec2/ec2.go | 24 ++++++------- plugins/processors/clone/clone_test.go | 35 ++++++++++--------- plugins/processors/converter/converter.go | 2 +- plugins/processors/date/date_test.go | 30 ++++++++-------- plugins/processors/dedup/dedup_test.go | 11 +++--- plugins/processors/defaults/defaults.go | 7 ++-- plugins/processors/defaults/defaults_test.go | 5 +-- plugins/processors/enum/enum_test.go | 32 ++++++++--------- plugins/processors/execd/execd_test.go | 33 ++++++++++------- plugins/processors/ifname/ifname.go | 24 ++++++------- plugins/processors/ifname/ifname_test.go | 7 ++-- plugins/processors/override/override_test.go | 29 +++++++-------- plugins/processors/rename/rename_test.go | 13 +++---- plugins/processors/reverse_dns/rdnscache.go | 9 ----- .../processors/reverse_dns/rdnscache_test.go | 9 ++++- .../processors/reverse_dns/reversedns_test.go | 9 +++-- plugins/processors/starlark/starlark_test.go | 21 ++++++----- plugins/processors/strings/strings_test.go | 29 +++++++-------- plugins/processors/tag_limit/tag_limit.go | 9 ++--- .../processors/tag_limit/tag_limit_test.go | 15 ++++---- plugins/processors/template/template_test.go | 6 ++-- 21 files changed, 192 insertions(+), 167 deletions(-) diff --git a/plugins/processors/aws/ec2/ec2.go b/plugins/processors/aws/ec2/ec2.go index 088ec09c83f5f..012cad92d0dde 100644 --- a/plugins/processors/aws/ec2/ec2.go +++ b/plugins/processors/aws/ec2/ec2.go @@ -26,13 +26,13 @@ type AwsEc2Processor struct { Timeout config.Duration `toml:"timeout"` Ordered bool `toml:"ordered"` MaxParallelCalls int `toml:"max_parallel_calls"` + Log telegraf.Logger `toml:"-"` - Log telegraf.Logger `toml:"-"` - imdsClient *imds.Client `toml:"-"` - imdsTags map[string]struct{} `toml:"-"` - ec2Client *ec2.Client `toml:"-"` - parallel parallel.Parallel `toml:"-"` - instanceID string `toml:"-"` + imdsClient *imds.Client + imdsTagsMap map[string]struct{} + ec2Client *ec2.Client + parallel parallel.Parallel + instanceID string } const sampleConfig = ` @@ -128,9 +128,9 @@ func (r *AwsEc2Processor) Init() error { if len(tag) == 0 || !isImdsTagAllowed(tag) { return fmt.Errorf("not allowed metadata tag specified in configuration: %s", tag) } - r.imdsTags[tag] = struct{}{} + r.imdsTagsMap[tag] = struct{}{} } - if len(r.imdsTags) == 0 && len(r.EC2Tags) == 0 { + if len(r.imdsTagsMap) == 0 && len(r.EC2Tags) == 0 { return errors.New("no allowed metadata tags specified in configuration") } @@ -186,7 +186,7 @@ func (r *AwsEc2Processor) Start(acc telegraf.Accumulator) error { func (r *AwsEc2Processor) Stop() error { if r.parallel == nil { - return errors.New("Trying to stop unstarted AWS EC2 Processor") + return errors.New("trying to stop unstarted AWS EC2 Processor") } r.parallel.Stop() return nil @@ -197,7 +197,7 @@ func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric { defer cancel() // Add IMDS Instance Identity Document tags. - if len(r.imdsTags) > 0 { + if len(r.imdsTagsMap) > 0 { iido, err := r.imdsClient.GetInstanceIdentityDocument( ctx, &imds.GetInstanceIdentityDocumentInput{}, @@ -207,7 +207,7 @@ func (r *AwsEc2Processor) asyncAdd(metric telegraf.Metric) []telegraf.Metric { return []telegraf.Metric{metric} } - for tag := range r.imdsTags { + for tag := range r.imdsTagsMap { if v := getTagFromInstanceIdentityDocument(iido, tag); v != "" { metric.AddTag(tag, v) } @@ -244,7 +244,7 @@ func newAwsEc2Processor() *AwsEc2Processor { return &AwsEc2Processor{ MaxParallelCalls: DefaultMaxParallelCalls, Timeout: config.Duration(DefaultTimeout), - imdsTags: make(map[string]struct{}), + imdsTagsMap: make(map[string]struct{}), } } diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go index 20bec925e7acb..1ef85f6e9f5a1 100644 --- a/plugins/processors/clone/clone_test.go +++ b/plugins/processors/clone/clone_test.go @@ -4,9 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func createTestMetric() telegraf.Metric { @@ -18,8 +19,8 @@ func createTestMetric() telegraf.Metric { return m } -func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string { - processed := processor.Apply(metric) +func calculateProcessedTags(processor Clone, m telegraf.Metric) map[string]string { + processed := processor.Apply(m) return processed[0].Tags() } @@ -29,8 +30,8 @@ func TestRetainsTags(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["metric_tag"] - assert.True(t, present, "Tag of metric was not present") - assert.Equal(t, "from_metric", value, "Value of Tag was changed") + require.True(t, present, "Tag of metric was not present") + require.Equal(t, "from_metric", value, "Value of Tag was changed") } func TestAddTags(t *testing.T) { @@ -39,9 +40,9 @@ func TestAddTags(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["added_tag"] - assert.True(t, present, "Additional Tag of metric was not present") - assert.Equal(t, "from_config", value, "Value of Tag was changed") - assert.Equal(t, 3, len(tags), "Should have one previous and two added tags.") + require.True(t, present, "Additional Tag of metric was not present") + require.Equal(t, "from_config", value, "Value of Tag was changed") + require.Equal(t, 3, len(tags), "Should have one previous and two added tags.") } func TestOverwritesPresentTagValues(t *testing.T) { @@ -50,9 +51,9 @@ func TestOverwritesPresentTagValues(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["metric_tag"] - assert.True(t, present, "Tag of metric was not present") - assert.Equal(t, 1, len(tags), "Should only have one tag.") - assert.Equal(t, "from_config", value, "Value of Tag was not changed") + require.True(t, present, "Tag of metric was not present") + require.Equal(t, 1, len(tags), "Should only have one tag.") + require.Equal(t, "from_config", value, "Value of Tag was not changed") } func TestOverridesName(t *testing.T) { @@ -60,8 +61,8 @@ func TestOverridesName(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") - assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") + require.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") + require.Equal(t, "m1", processed[1].Name(), "Original metric was modified") } func TestNamePrefix(t *testing.T) { @@ -69,8 +70,8 @@ func TestNamePrefix(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") - assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") + require.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") + require.Equal(t, "m1", processed[1].Name(), "Original metric was modified") } func TestNameSuffix(t *testing.T) { @@ -78,6 +79,6 @@ func TestNameSuffix(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") - assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") + require.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") + require.Equal(t, "m1", processed[1].Name(), "Original metric was modified") } diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index fd56cc4d9a6a8..042b7d8f2c434 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -328,7 +328,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { } } -func toBool(v interface{}) (bool, bool) { +func toBool(v interface{}) (val bool, ok bool) { switch value := v.(type) { case int64: return value != 0, true diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go index aa7efc64edbff..83df86b3e4134 100644 --- a/plugins/processors/date/date_test.go +++ b/plugins/processors/date/date_test.go @@ -4,12 +4,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { @@ -53,9 +53,9 @@ func TestMonthTag(t *testing.T) { m2 := MustMetric("bar", nil, nil, currentTime) m3 := MustMetric("baz", nil, nil, currentTime) monthApply := dateFormatMonth.Apply(m1, m2, m3) - assert.Equal(t, map[string]string{"month": month}, monthApply[0].Tags(), "should add tag 'month'") - assert.Equal(t, map[string]string{"month": month}, monthApply[1].Tags(), "should add tag 'month'") - assert.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'") + require.Equal(t, map[string]string{"month": month}, monthApply[0].Tags(), "should add tag 'month'") + require.Equal(t, map[string]string{"month": month}, monthApply[1].Tags(), "should add tag 'month'") + require.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'") } func TestMonthField(t *testing.T) { @@ -74,9 +74,9 @@ func TestMonthField(t *testing.T) { m2 := MustMetric("bar", nil, nil, currentTime) m3 := MustMetric("baz", nil, nil, currentTime) monthApply := dateFormatMonth.Apply(m1, m2, m3) - assert.Equal(t, map[string]interface{}{"month": month}, monthApply[0].Fields(), "should add field 'month'") - assert.Equal(t, map[string]interface{}{"month": month}, monthApply[1].Fields(), "should add field 'month'") - assert.Equal(t, map[string]interface{}{"month": month}, monthApply[2].Fields(), "should add field 'month'") + require.Equal(t, map[string]interface{}{"month": month}, monthApply[0].Fields(), "should add field 'month'") + require.Equal(t, map[string]interface{}{"month": month}, monthApply[1].Fields(), "should add field 'month'") + require.Equal(t, map[string]interface{}{"month": month}, monthApply[2].Fields(), "should add field 'month'") } func TestOldDateTag(t *testing.T) { @@ -90,7 +90,7 @@ func TestOldDateTag(t *testing.T) { m7 := MustMetric("foo", nil, nil, time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC)) customDateApply := dateFormatYear.Apply(m7) - assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") + require.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") } func TestFieldUnix(t *testing.T) { @@ -107,7 +107,7 @@ func TestFieldUnix(t *testing.T) { m8 := MustMetric("foo", nil, nil, currentTime) unixApply := dateFormatUnix.Apply(m8) - assert.Equal(t, map[string]interface{}{"unix": unixTime}, unixApply[0].Fields(), "should add unix time in s as field 'unix'") + require.Equal(t, map[string]interface{}{"unix": unixTime}, unixApply[0].Fields(), "should add unix time in s as field 'unix'") } func TestFieldUnixNano(t *testing.T) { @@ -124,7 +124,7 @@ func TestFieldUnixNano(t *testing.T) { m9 := MustMetric("foo", nil, nil, currentTime) unixNanoApply := dateFormatUnixNano.Apply(m9) - assert.Equal(t, map[string]interface{}{"unix_ns": unixNanoTime}, unixNanoApply[0].Fields(), "should add unix time in ns as field 'unix_ns'") + require.Equal(t, map[string]interface{}{"unix_ns": unixNanoTime}, unixNanoApply[0].Fields(), "should add unix time in ns as field 'unix_ns'") } func TestFieldUnixMillis(t *testing.T) { @@ -141,7 +141,7 @@ func TestFieldUnixMillis(t *testing.T) { m10 := MustMetric("foo", nil, nil, currentTime) unixMillisApply := dateFormatUnixMillis.Apply(m10) - assert.Equal(t, map[string]interface{}{"unix_ms": unixMillisTime}, unixMillisApply[0].Fields(), "should add unix time in ms as field 'unix_ms'") + require.Equal(t, map[string]interface{}{"unix_ms": unixMillisTime}, unixMillisApply[0].Fields(), "should add unix time in ms as field 'unix_ms'") } func TestFieldUnixMicros(t *testing.T) { @@ -158,7 +158,7 @@ func TestFieldUnixMicros(t *testing.T) { m11 := MustMetric("foo", nil, nil, currentTime) unixMicrosApply := dateFormatUnixMicros.Apply(m11) - assert.Equal(t, map[string]interface{}{"unix_us": unixMicrosTime}, unixMicrosApply[0].Fields(), "should add unix time in us as field 'unix_us'") + require.Equal(t, map[string]interface{}{"unix_us": unixMicrosTime}, unixMicrosApply[0].Fields(), "should add unix time in us as field 'unix_us'") } func TestDateOffset(t *testing.T) { @@ -171,7 +171,7 @@ func TestDateOffset(t *testing.T) { err := plugin.Init() require.NoError(t, err) - metric := testutil.MustMetric( + m := testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -193,6 +193,6 @@ func TestDateOffset(t *testing.T) { ), } - actual := plugin.Apply(metric) + actual := plugin.Apply(m) testutil.RequireMetricsEqual(t, expected, actual) } diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go index 4f3d109345b32..d5cc83192747e 100644 --- a/plugins/processors/dedup/dedup_test.go +++ b/plugins/processors/dedup/dedup_test.go @@ -77,6 +77,7 @@ func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf. tValue, present := target[0].GetField("value") require.True(t, present) sValue, present := source.GetField("value") + require.True(t, present) require.Equal(t, tValue, sValue) // target metric has proper timestamp require.Equal(t, target[0].Time(), source.Time()) @@ -100,9 +101,9 @@ func TestSuppressRepeatedValue(t *testing.T) { deduplicate := createDedup(time.Now()) // Create metric in the past source := createMetric(1, time.Now().Add(-1*time.Second)) - target := deduplicate.Apply(source) + _ = deduplicate.Apply(source) source = createMetric(1, time.Now()) - target = deduplicate.Apply(source) + target := deduplicate.Apply(source) assertCacheHit(t, &deduplicate, source) assertMetricSuppressed(t, target) @@ -113,9 +114,10 @@ func TestPassUpdatedValue(t *testing.T) { // Create metric in the past source := createMetric(1, time.Now().Add(-1*time.Second)) target := deduplicate.Apply(source) + assertMetricPassed(t, target, source) + source = createMetric(2, time.Now()) target = deduplicate.Apply(source) - assertCacheRefresh(t, &deduplicate, source) assertMetricPassed(t, target, source) } @@ -125,9 +127,10 @@ func TestPassAfterCacheExpire(t *testing.T) { // Create metric in the past source := createMetric(1, time.Now().Add(-1*time.Hour)) target := deduplicate.Apply(source) + assertMetricPassed(t, target, source) + source = createMetric(1, time.Now()) target = deduplicate.Apply(source) - assertCacheRefresh(t, &deduplicate, source) assertMetricPassed(t, target, source) } diff --git a/plugins/processors/defaults/defaults.go b/plugins/processors/defaults/defaults.go index eaffdf81a2429..5d6522fad45c9 100644 --- a/plugins/processors/defaults/defaults.go +++ b/plugins/processors/defaults/defaults.go @@ -1,9 +1,10 @@ package defaults import ( + "strings" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" - "strings" ) const sampleConfig = ` @@ -58,10 +59,10 @@ func (def *Defaults) Apply(inputMetrics ...telegraf.Metric) []telegraf.Metric { } func maybeTrimmedString(v interface{}) (string, bool) { - switch value := v.(type) { - case string: + if value, ok := v.(string); ok { return strings.TrimSpace(value), true } + return "", false } diff --git a/plugins/processors/defaults/defaults_test.go b/plugins/processors/defaults/defaults_test.go index c0e930fc6b887..5d6808f4f5a71 100644 --- a/plugins/processors/defaults/defaults_test.go +++ b/plugins/processors/defaults/defaults_test.go @@ -4,9 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestDefaults(t *testing.T) { @@ -124,7 +125,7 @@ func TestDefaults(t *testing.T) { defaults := scenario.defaults resultMetrics := defaults.Apply(scenario.input) - assert.Len(t, resultMetrics, 1) + require.Len(t, resultMetrics, 1) testutil.RequireMetricsEqual(t, scenario.expected, resultMetrics) }) } diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 53603ae0153c7..4addf32b0b7ca 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func createTestMetric() telegraf.Metric { @@ -19,7 +19,7 @@ func createTestMetric() telegraf.Metric { map[string]interface{}{ "string_value": "test", "duplicate_string_value": "test", - "int_value": int(200), + "int_value": 200, "uint_value": uint(500), "float_value": float64(3.14), "true_value": true, @@ -29,26 +29,26 @@ func createTestMetric() telegraf.Metric { return m } -func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[string]interface{} { - processed := mapper.Apply(metric) +func calculateProcessedValues(mapper EnumMapper, m telegraf.Metric) map[string]interface{} { + processed := mapper.Apply(m) return processed[0].Fields() } -func calculateProcessedTags(mapper EnumMapper, metric telegraf.Metric) map[string]string { - processed := mapper.Apply(metric) +func calculateProcessedTags(mapper EnumMapper, m telegraf.Metric) map[string]string { + processed := mapper.Apply(m) return processed[0].Tags() } func assertFieldValue(t *testing.T, expected interface{}, field string, fields map[string]interface{}) { value, present := fields[field] - assert.True(t, present, "value of field '"+field+"' was not present") - assert.EqualValues(t, expected, value) + require.True(t, present, "value of field '"+field+"' was not present") + require.EqualValues(t, expected, value) } func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[string]string) { value, present := tags[tag] - assert.True(t, present, "value of tag '"+tag+"' was not present") - assert.EqualValues(t, expected, value) + require.True(t, present, "value of tag '"+tag+"' was not present") + require.EqualValues(t, expected, value) } func TestRetainsMetric(t *testing.T) { @@ -65,9 +65,9 @@ func TestRetainsMetric(t *testing.T) { assertFieldValue(t, 500, "uint_value", fields) assertFieldValue(t, float64(3.14), "float_value", fields) assertFieldValue(t, true, "true_value", fields) - assert.Equal(t, "m1", target.Name()) - assert.Equal(t, source.Tags(), target.Tags()) - assert.Equal(t, source.Time(), target.Time()) + require.Equal(t, "m1", target.Name()) + require.Equal(t, source.Tags(), target.Tags()) + require.Equal(t, source.Time(), target.Time()) } func TestMapsSingleStringValueTag(t *testing.T) { @@ -118,7 +118,7 @@ func TestMappings(t *testing.T) { for index := range mapping["target_value"] { mapper := EnumMapper{Mappings: []Mapping{{Field: fieldName, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} err := mapper.Init() - assert.Nil(t, err) + require.Nil(t, err) fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, mapping["expected_value"][index], fieldName, fields) } @@ -171,7 +171,7 @@ func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) { assertFieldValue(t, "test", "string_value", fields) _, present := fields[field] - assert.False(t, present, "value of field '"+field+"' was present") + require.False(t, present, "value of field '"+field+"' was present") } func TestFieldGlobMatching(t *testing.T) { diff --git a/plugins/processors/execd/execd_test.go b/plugins/processors/execd/execd_test.go index c226725e1844e..26af720132959 100644 --- a/plugins/processors/execd/execd_test.go +++ b/plugins/processors/execd/execd_test.go @@ -7,13 +7,13 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestExternalProcessorWorks(t *testing.T) { @@ -32,7 +32,6 @@ func TestExternalProcessorWorks(t *testing.T) { now := time.Now() orig := now - metrics := []telegraf.Metric{} for i := 0; i < 10; i++ { m := metric.New("test", map[string]string{ @@ -43,17 +42,16 @@ func TestExternalProcessorWorks(t *testing.T) { "count": 1, }, now) - metrics = append(metrics, m) now = now.Add(1) - e.Add(m, acc) + require.NoError(t, e.Add(m, acc)) } acc.Wait(1) require.NoError(t, e.Stop()) acc.Wait(9) - metrics = acc.GetTelegrafMetrics() + metrics := acc.GetTelegrafMetrics() m := metrics[0] expected := testutil.MustMetric("test", @@ -105,7 +103,7 @@ func TestParseLinesWithNewLines(t *testing.T) { }, now) - e.Add(m, acc) + require.NoError(t, e.Add(m, acc)) acc.Wait(1) require.NoError(t, e.Stop()) @@ -144,40 +142,51 @@ func runCountMultiplierProgram() { serializer, _ := serializers.NewInfluxSerializer() for { - metric, err := parser.Next() + m, err := parser.Next() if err != nil { if err == influx.EOF { return // stream ended } if parseErr, isParseError := err.(*influx.ParseError); isParseError { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "parse ERR %v\n", parseErr) + //nolint:revive // os.Exit called intentionally os.Exit(1) } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) + //nolint:revive // os.Exit called intentionally os.Exit(1) } - c, found := metric.GetField("count") + c, found := m.GetField("count") if !found { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "metric has no count field\n") + //nolint:revive // os.Exit called intentionally os.Exit(1) } switch t := c.(type) { case float64: t *= 2 - metric.AddField("count", t) + m.AddField("count", t) case int64: t *= 2 - metric.AddField("count", t) + m.AddField("count", t) default: + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "count is not an unknown type, it's a %T\n", c) + //nolint:revive // os.Exit called intentionally os.Exit(1) } - b, err := serializer.Serialize(metric) + b, err := serializer.Serialize(m) if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) + //nolint:revive // os.Exit called intentionally os.Exit(1) } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, string(b)) } } diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index eb3fb2333e278..228124e7b9b83 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -80,7 +80,7 @@ type valType = nameMap type mapFunc func(agent string) (nameMap, error) type makeTableFunc func(string) (*si.Table, error) -type sigMap map[string](chan struct{}) +type sigMap map[string]chan struct{} type IfName struct { SourceTag string `toml:"tag"` @@ -96,24 +96,24 @@ type IfName struct { Log telegraf.Logger `toml:"-"` - ifTable *si.Table `toml:"-"` - ifXTable *si.Table `toml:"-"` + ifTable *si.Table + ifXTable *si.Table - lock sync.Mutex `toml:"-"` - cache *TTLCache `toml:"-"` + lock sync.Mutex + cache *TTLCache - parallel parallel.Parallel `toml:"-"` - acc telegraf.Accumulator `toml:"-"` + parallel parallel.Parallel + acc telegraf.Accumulator - getMapRemote mapFunc `toml:"-"` - makeTable makeTableFunc `toml:"-"` + getMapRemote mapFunc + makeTable makeTableFunc - gsBase snmp.GosnmpWrapper `toml:"-"` + gsBase snmp.GosnmpWrapper - sigs sigMap `toml:"-"` + sigs sigMap } -const minRetry time.Duration = 5 * time.Minute +const minRetry = 5 * time.Minute func (d *IfName) SampleConfig() string { return sampleConfig diff --git a/plugins/processors/ifname/ifname_test.go b/plugins/processors/ifname/ifname_test.go index 4052818f7509b..16139073837eb 100644 --- a/plugins/processors/ifname/ifname_test.go +++ b/plugins/processors/ifname/ifname_test.go @@ -18,15 +18,16 @@ func TestTable(t *testing.T) { t.Skip("Skipping test due to connect failures") d := IfName{} - d.Init() + err := d.Init() + require.NoError(t, err) tab, err := d.makeTable("IF-MIB::ifTable") require.NoError(t, err) - config := snmp.ClientConfig{ + clientConfig := snmp.ClientConfig{ Version: 2, Timeout: config.Duration(5 * time.Second), // Doesn't work with 0 timeout } - gs, err := snmp.NewWrapper(config) + gs, err := snmp.NewWrapper(clientConfig) require.NoError(t, err) err = gs.SetAgent("127.0.0.1") require.NoError(t, err) diff --git a/plugins/processors/override/override_test.go b/plugins/processors/override/override_test.go index 5e3c118e8f268..74e228d2eaa95 100644 --- a/plugins/processors/override/override_test.go +++ b/plugins/processors/override/override_test.go @@ -4,9 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func createTestMetric() telegraf.Metric { @@ -18,8 +19,8 @@ func createTestMetric() telegraf.Metric { return m } -func calculateProcessedTags(processor Override, metric telegraf.Metric) map[string]string { - processed := processor.Apply(metric) +func calculateProcessedTags(processor Override, m telegraf.Metric) map[string]string { + processed := processor.Apply(m) return processed[0].Tags() } @@ -29,8 +30,8 @@ func TestRetainsTags(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["metric_tag"] - assert.True(t, present, "Tag of metric was not present") - assert.Equal(t, "from_metric", value, "Value of Tag was changed") + require.True(t, present, "Tag of metric was not present") + require.Equal(t, "from_metric", value, "Value of Tag was changed") } func TestAddTags(t *testing.T) { @@ -39,9 +40,9 @@ func TestAddTags(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["added_tag"] - assert.True(t, present, "Additional Tag of metric was not present") - assert.Equal(t, "from_config", value, "Value of Tag was changed") - assert.Equal(t, 3, len(tags), "Should have one previous and two added tags.") + require.True(t, present, "Additional Tag of metric was not present") + require.Equal(t, "from_config", value, "Value of Tag was changed") + require.Equal(t, 3, len(tags), "Should have one previous and two added tags.") } func TestOverwritesPresentTagValues(t *testing.T) { @@ -50,9 +51,9 @@ func TestOverwritesPresentTagValues(t *testing.T) { tags := calculateProcessedTags(processor, createTestMetric()) value, present := tags["metric_tag"] - assert.True(t, present, "Tag of metric was not present") - assert.Equal(t, 1, len(tags), "Should only have one tag.") - assert.Equal(t, "from_config", value, "Value of Tag was not changed") + require.True(t, present, "Tag of metric was not present") + require.Equal(t, 1, len(tags), "Should only have one tag.") + require.Equal(t, "from_config", value, "Value of Tag was not changed") } func TestOverridesName(t *testing.T) { @@ -60,7 +61,7 @@ func TestOverridesName(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") + require.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") } func TestNamePrefix(t *testing.T) { @@ -68,7 +69,7 @@ func TestNamePrefix(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") + require.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") } func TestNameSuffix(t *testing.T) { @@ -76,5 +77,5 @@ func TestNameSuffix(t *testing.T) { processed := processor.Apply(createTestMetric()) - assert.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") + require.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") } diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go index 36e8aaeed43a0..b917e486b921e 100644 --- a/plugins/processors/rename/rename_test.go +++ b/plugins/processors/rename/rename_test.go @@ -4,9 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func newMetric(name string, tags map[string]string, fields map[string]interface{}) telegraf.Metric { @@ -31,9 +32,9 @@ func TestMeasurementRename(t *testing.T) { m2 := newMetric("bar", nil, nil) m3 := newMetric("baz", nil, nil) results := r.Apply(m1, m2, m3) - assert.Equal(t, "bar", results[0].Name(), "Should change name from 'foo' to 'bar'") - assert.Equal(t, "bar", results[1].Name(), "Should not name from 'bar'") - assert.Equal(t, "quux", results[2].Name(), "Should change name from 'baz' to 'quux'") + require.Equal(t, "bar", results[0].Name(), "Should change name from 'foo' to 'bar'") + require.Equal(t, "bar", results[1].Name(), "Should not name from 'bar'") + require.Equal(t, "quux", results[2].Name(), "Should change name from 'baz' to 'quux'") } func TestTagRename(t *testing.T) { @@ -45,7 +46,7 @@ func TestTagRename(t *testing.T) { m := newMetric("foo", map[string]string{"hostname": "localhost", "region": "east-1"}, nil) results := r.Apply(m) - assert.Equal(t, map[string]string{"host": "localhost", "region": "east-1"}, results[0].Tags(), "should change tag 'hostname' to 'host'") + require.Equal(t, map[string]string{"host": "localhost", "region": "east-1"}, results[0].Tags(), "should change tag 'hostname' to 'host'") } func TestFieldRename(t *testing.T) { @@ -57,5 +58,5 @@ func TestFieldRename(t *testing.T) { m := newMetric("foo", nil, map[string]interface{}{"time_msec": int64(1250), "snakes": true}) results := r.Apply(m) - assert.Equal(t, map[string]interface{}{"time": int64(1250), "snakes": true}, results[0].Fields(), "should change field 'time_msec' to 'time'") + require.Equal(t, map[string]interface{}{"time": int64(1250), "snakes": true}, results[0].Fields(), "should change field 'time_msec' to 'time'") } diff --git a/plugins/processors/reverse_dns/rdnscache.go b/plugins/processors/reverse_dns/rdnscache.go index cc9574552dae8..c027fc132ef33 100644 --- a/plugins/processors/reverse_dns/rdnscache.go +++ b/plugins/processors/reverse_dns/rdnscache.go @@ -104,10 +104,7 @@ func (d *ReverseDNSCache) Lookup(ip string) ([]string, error) { if len(ip) == 0 { return nil, nil } - return d.lookup(ip) -} -func (d *ReverseDNSCache) lookup(ip string) ([]string, error) { // check if the value is cached d.rwLock.RLock() result, found := d.lockedGetFromCache(ip) @@ -298,12 +295,6 @@ func (d *ReverseDNSCache) cleanup() { } } -// blockAllWorkers is a test function that eats up all the worker pool space to -// make sure workers are done running and there's no room to acquire a new worker. -func (d *ReverseDNSCache) blockAllWorkers() { - d.sem.Acquire(context.Background(), int64(d.maxWorkers)) -} - func (d *ReverseDNSCache) Stats() RDNSCacheStats { stats := RDNSCacheStats{} stats.CacheHit = atomic.LoadUint64(&d.stats.CacheHit) diff --git a/plugins/processors/reverse_dns/rdnscache_test.go b/plugins/processors/reverse_dns/rdnscache_test.go index 97cc8abdbdff8..b717e64efc4dc 100644 --- a/plugins/processors/reverse_dns/rdnscache_test.go +++ b/plugins/processors/reverse_dns/rdnscache_test.go @@ -18,7 +18,8 @@ func TestSimpleReverseDNSLookup(t *testing.T) { answer, err := d.Lookup("127.0.0.1") require.NoError(t, err) require.Equal(t, []string{"localhost"}, answer) - d.blockAllWorkers() + err = blockAllWorkers(d) + require.NoError(t, err) // do another request with no workers available. // it should read from cache instantly. @@ -134,3 +135,9 @@ type localResolver struct{} func (r *localResolver) LookupAddr(_ context.Context, _ string) (names []string, err error) { return []string{"localhost"}, nil } + +// blockAllWorkers is a test function that eats up all the worker pool space to +// make sure workers are done running and there's no room to acquire a new worker. +func blockAllWorkers(d *ReverseDNSCache) error { + return d.sem.Acquire(context.Background(), int64(d.maxWorkers)) +} diff --git a/plugins/processors/reverse_dns/reversedns_test.go b/plugins/processors/reverse_dns/reversedns_test.go index 5fcce5fb4725a..6db0b2ce5da93 100644 --- a/plugins/processors/reverse_dns/reversedns_test.go +++ b/plugins/processors/reverse_dns/reversedns_test.go @@ -33,9 +33,12 @@ func TestSimpleReverseLookup(t *testing.T) { }, } acc := &testutil.Accumulator{} - dns.Start(acc) - dns.Add(m, acc) - dns.Stop() + err := dns.Start(acc) + require.NoError(t, err) + err = dns.Add(m, acc) + require.NoError(t, err) + err = dns.Stop() + require.NoError(t, err) // should be processed now. require.Len(t, acc.GetTelegrafMetrics(), 1) diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 3a1f955a884c2..df3e53f6c092f 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -9,15 +9,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + starlarktime "go.starlark.net/lib/time" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" common "github.com/influxdata/telegraf/plugins/common/starlark" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - starlarktime "go.starlark.net/lib/time" - "go.starlark.net/starlark" - "go.starlark.net/starlarkstruct" ) // Tests for runtime errors in the processors Init function. @@ -2674,11 +2675,11 @@ func buildPlugin(configContent string) (*Starlark, error) { return nil, err } if len(c.Processors) != 1 { - return nil, errors.New("Only one processor was expected") + return nil, errors.New("only one processor was expected") } plugin, ok := (c.Processors[0].Processor).(*Starlark) if !ok { - return nil, errors.New("Only a Starlark processor was expected") + return nil, errors.New("only a Starlark processor was expected") } plugin.Log = testutil.Logger{} return plugin, nil @@ -3199,7 +3200,8 @@ def apply(metric): b.ResetTimer() for n := 0; n < b.N; n++ { for _, m := range tt.input { - plugin.Add(m, &acc) + err = plugin.Add(m, &acc) + require.NoError(b, err) } } @@ -3213,7 +3215,7 @@ func TestAllScriptTestData(t *testing.T) { // can be run from multiple folders paths := []string{"testdata", "plugins/processors/starlark/testdata"} for _, testdataPath := range paths { - filepath.Walk(testdataPath, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(testdataPath, func(path string, info os.FileInfo, err error) error { if info == nil || info.IsDir() { return nil } @@ -3252,6 +3254,7 @@ func TestAllScriptTestData(t *testing.T) { }) return nil }) + require.NoError(t, err) } } @@ -3316,7 +3319,7 @@ func testLoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, e return result, nil } -func testNow(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func testNow(_ *starlark.Thread, _ *starlark.Builtin, _ starlark.Tuple, _ []starlark.Tuple) (starlark.Value, error) { return starlarktime.Time(time.Date(2021, 4, 15, 12, 0, 0, 999, time.UTC)), nil } diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go index c4201188436e6..26c3e85a91ca4 100644 --- a/plugins/processors/strings/strings_test.go +++ b/plugins/processors/strings/strings_test.go @@ -4,11 +4,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func newM1() telegraf.Metric { @@ -318,6 +318,7 @@ func TestFieldKeyConversions(t *testing.T) { check: func(t *testing.T, actual telegraf.Metric) { fv, ok := actual.GetField("Request") require.False(t, ok) + require.Nil(t, fv) fv, ok = actual.GetField("REQUEST") require.True(t, ok) @@ -686,7 +687,7 @@ func TestTagKeyConversions(t *testing.T) { require.True(t, ok) require.Equal(t, "GET", tv) - tv, ok = actual.GetTag("S-ComputerName") + _, ok = actual.GetTag("S-ComputerName") require.False(t, ok) tv, ok = actual.GetTag("s-computername") @@ -708,7 +709,7 @@ func TestTagKeyConversions(t *testing.T) { require.True(t, ok) require.Equal(t, "GET", tv) - tv, ok = actual.GetTag("S-ComputerName") + _, ok = actual.GetTag("S-ComputerName") require.False(t, ok) tv, ok = actual.GetTag("S-COMPUTERNAME") @@ -831,8 +832,8 @@ func TestMultipleConversions(t *testing.T) { "bar": "y", } - assert.Equal(t, expectedFields, processed[0].Fields()) - assert.Equal(t, expectedTags, processed[0].Tags()) + require.Equal(t, expectedFields, processed[0].Fields()) + require.Equal(t, expectedTags, processed[0].Tags()) } func TestReadmeExample(t *testing.T) { @@ -888,8 +889,8 @@ func TestReadmeExample(t *testing.T) { "resp_bytes": int64(270), } - assert.Equal(t, expectedFields, processed[0].Fields()) - assert.Equal(t, expectedTags, processed[0].Tags()) + require.Equal(t, expectedFields, processed[0].Fields()) + require.Equal(t, expectedTags, processed[0].Tags()) } func newMetric(name string) telegraf.Metric { @@ -915,9 +916,9 @@ func TestMeasurementReplace(t *testing.T) { newMetric("average_cpu_usage"), } results := plugin.Apply(metrics...) - assert.Equal(t, "foo:some-value:bar", results[0].Name(), "`_` was not changed to `-`") - assert.Equal(t, "average:cpu:usage", results[1].Name(), "Input name should have been unchanged") - assert.Equal(t, "average-cpu-usage", results[2].Name(), "All instances of `_` should have been changed to `-`") + require.Equal(t, "foo:some-value:bar", results[0].Name(), "`_` was not changed to `-`") + require.Equal(t, "average:cpu:usage", results[1].Name(), "Input name should have been unchanged") + require.Equal(t, "average-cpu-usage", results[2].Name(), "All instances of `_` should have been changed to `-`") } func TestMeasurementCharDeletion(t *testing.T) { @@ -936,9 +937,9 @@ func TestMeasurementCharDeletion(t *testing.T) { newMetric("barbarbar"), } results := plugin.Apply(metrics...) - assert.Equal(t, ":bar:baz", results[0].Name(), "Should have deleted the initial `foo`") - assert.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string") - assert.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input") + require.Equal(t, ":bar:baz", results[0].Name(), "Should have deleted the initial `foo`") + require.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string") + require.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input") } func TestBase64Decode(t *testing.T) { diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go index 1b48739a189f1..ef2e86c817516 100644 --- a/plugins/processors/tag_limit/tag_limit.go +++ b/plugins/processors/tag_limit/tag_limit.go @@ -2,9 +2,9 @@ package taglimit import ( "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/processors" - "log" ) const sampleConfig = ` @@ -16,8 +16,9 @@ const sampleConfig = ` ` type TagLimit struct { - Limit int `toml:"limit"` - Keep []string `toml:"keep"` + Limit int `toml:"limit"` + Keep []string `toml:"keep"` + Log telegraf.Logger `toml:"-"` init bool keepTags map[string]string } @@ -49,7 +50,7 @@ func (d *TagLimit) initOnce() error { func (d *TagLimit) Apply(in ...telegraf.Metric) []telegraf.Metric { err := d.initOnce() if err != nil { - log.Printf("E! [processors.tag_limit] could not create tag_limit processor: %v", err) + d.Log.Errorf("Could not create tag_limit processor: %v", err) return in } for _, point := range in { diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go index d9c361ed07296..3b2894d8a040d 100644 --- a/plugins/processors/tag_limit/tag_limit_test.go +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -4,9 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { @@ -46,8 +47,8 @@ func TestUnderLimit(t *testing.T) { m1 := MustMetric("foo", oneTags, nil, currentTime) m2 := MustMetric("bar", tenTags, nil, currentTime) limitApply := tagLimitConfig.Apply(m1, m2) - assert.Equal(t, oneTags, limitApply[0].Tags(), "one tag") - assert.Equal(t, tenTags, limitApply[1].Tags(), "ten tags") + require.Equal(t, oneTags, limitApply[0].Tags(), "one tag") + require.Equal(t, tenTags, limitApply[1].Tags(), "ten tags") } func TestTrim(t *testing.T) { @@ -78,9 +79,9 @@ func TestTrim(t *testing.T) { m1 := MustMetric("foo", threeTags, nil, currentTime) m2 := MustMetric("bar", tenTags, nil, currentTime) limitApply := tagLimitConfig.Apply(m1, m2) - assert.Equal(t, threeTags, limitApply[0].Tags(), "three tags") + require.Equal(t, threeTags, limitApply[0].Tags(), "three tags") trimmedTags := limitApply[1].Tags() - assert.Equal(t, 3, len(trimmedTags), "ten tags") - assert.Equal(t, "foo", trimmedTags["a"], "preserved: a") - assert.Equal(t, "bar", trimmedTags["b"], "preserved: b") + require.Equal(t, 3, len(trimmedTags), "ten tags") + require.Equal(t, "foo", trimmedTags["a"], "preserved: a") + require.Equal(t, "bar", trimmedTags["b"], "preserved: b") } diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go index f43d697956193..c3f25742d30b8 100644 --- a/plugins/processors/template/template_test.go +++ b/plugins/processors/template/template_test.go @@ -4,10 +4,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestName(t *testing.T) { @@ -90,7 +90,7 @@ func TestMetricMissingTagsIsNotLost(t *testing.T) { // assert // make sure no metrics are lost when a template process fails - assert.Equal(t, 2, len(actual), "Number of metrics input should equal number of metrics output") + require.Equal(t, 2, len(actual), "Number of metrics input should equal number of metrics output") } func TestTagAndFieldConcatenate(t *testing.T) { From 020b77b23973011b83e8f438ffccce404d92d306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Wed, 24 Nov 2021 20:40:25 +0100 Subject: [PATCH 085/133] fix: Linter fixes for plugins/outputs/[g-m]* (#10127) Co-authored-by: Pawel Zak --- plugins/outputs/graphite/graphite.go | 19 ++-- plugins/outputs/graphite/graphite_test.go | 64 +++++++------ plugins/outputs/graylog/graylog.go | 74 +++++++++++---- plugins/outputs/graylog/graylog_test.go | 24 ++--- plugins/outputs/health/health.go | 4 +- plugins/outputs/health/health_test.go | 4 +- plugins/outputs/http/http.go | 6 +- plugins/outputs/http/http_test.go | 8 +- plugins/outputs/influxdb/http.go | 4 +- plugins/outputs/influxdb/udp.go | 2 +- plugins/outputs/influxdb/udp_test.go | 5 +- plugins/outputs/influxdb_v2/http.go | 14 +-- .../outputs/influxdb_v2/http_internal_test.go | 4 +- plugins/outputs/influxdb_v2/http_test.go | 10 ++- plugins/outputs/influxdb_v2/influxdb.go | 11 +-- plugins/outputs/instrumental/instrumental.go | 14 +-- .../outputs/instrumental/instrumental_test.go | 44 +++++---- plugins/outputs/kafka/kafka.go | 5 +- plugins/outputs/kafka/kafka_test.go | 11 +-- plugins/outputs/kinesis/kinesis_test.go | 62 +++++-------- plugins/outputs/librato/librato.go | 89 ++++++++++--------- plugins/outputs/loki/loki.go | 11 +-- plugins/outputs/loki/loki_test.go | 7 +- plugins/outputs/mqtt/mqtt.go | 15 ++-- plugins/outputs/nats/nats.go | 8 +- plugins/outputs/newrelic/newrelic.go | 7 +- plugins/outputs/nsq/nsq.go | 15 ++-- 27 files changed, 302 insertions(+), 239 deletions(-) diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index bd35a4203385a..11a712c36f721 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -113,7 +113,7 @@ func (g *Graphite) Connect() error { func (g *Graphite) Close() error { // Closing all connections for _, conn := range g.conns { - conn.Close() + _ = conn.Close() } return nil } @@ -133,11 +133,16 @@ func (g *Graphite) Description() string { // props to Tv via the authors of carbon-relay-ng` for this trick. func (g *Graphite) checkEOF(conn net.Conn) { b := make([]byte, 1024) - conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + + if err := conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { + g.Log.Errorf("Couldn't set read deadline for connection %s. closing conn explicitly", conn) + _ = conn.Close() + return + } num, err := conn.Read(b) if err == io.EOF { g.Log.Errorf("Conn %s is closed. closing conn explicitly", conn) - conn.Close() + _ = conn.Close() return } // just in case i misunderstand something or the remote behaves badly @@ -147,7 +152,7 @@ func (g *Graphite) checkEOF(conn net.Conn) { // Log non-timeout errors or close. if e, ok := err.(net.Error); !(ok && e.Timeout()) { g.Log.Errorf("conn %s checkEOF .conn.Read returned err != EOF, which is unexpected. closing conn. error: %s", conn, err) - conn.Close() + _ = conn.Close() } } @@ -174,7 +179,7 @@ func (g *Graphite) Write(metrics []telegraf.Metric) error { // try to reconnect and retry to send if err != nil { g.Log.Error("Graphite: Reconnecting and retrying...") - g.Connect() + _ = g.Connect() err = g.send(batch) } @@ -189,14 +194,14 @@ func (g *Graphite) send(batch []byte) error { p := rand.Perm(len(g.conns)) for _, n := range p { if g.Timeout > 0 { - g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second)) + _ = g.conns[n].SetWriteDeadline(time.Now().Add(time.Duration(g.Timeout) * time.Second)) } g.checkEOF(g.conns[n]) if _, e := g.conns[n].Write(batch); e != nil { // Error g.Log.Errorf("Graphite Error: " + e.Error()) // Close explicitly and let's try the next one - g.conns[n].Close() + _ = g.conns[n].Close() } else { // Success err = nil diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 1cb58b19485fc..7f96cb57ce538 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -8,13 +8,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) func TestGraphiteError(t *testing.T) { @@ -39,7 +37,7 @@ func TestGraphiteError(t *testing.T) { require.NoError(t, err1) err2 := g.Write(metrics) require.Error(t, err2) - assert.Equal(t, "could not write to any Graphite server in cluster", err2.Error()) + require.Equal(t, "could not write to any Graphite server in cluster", err2.Error()) } func TestGraphiteOK(t *testing.T) { @@ -490,9 +488,9 @@ func TCPServer1(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) data1, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) - conn.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) + require.NoError(t, conn.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -504,11 +502,11 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn2) tp := textproto.NewReader(reader) data2, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) + require.Equal(t, "my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) data3, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3) - conn2.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.192_168_0_1.my_measurement 3.14 1289430000", data3) + require.NoError(t, conn2.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -520,9 +518,9 @@ func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) data1, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1.myfield 3.14 1289430000", data1) - conn.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1.myfield 3.14 1289430000", data1) + require.NoError(t, conn.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -534,11 +532,11 @@ func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn2) tp := textproto.NewReader(reader) data2, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1 3.14 1289430000", data2) + require.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1 3.14 1289430000", data2) data3, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.valuetag 3.14 1289430000", data3) - conn2.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.192_168_0_1.my_measurement.valuetag 3.14 1289430000", data3) + require.NoError(t, conn2.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -550,9 +548,9 @@ func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) data1, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.mymeasurement.myfield;host=192.168.0.1 3.14 1289430000", data1) - conn.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.mymeasurement.myfield;host=192.168.0.1 3.14 1289430000", data1) + require.NoError(t, conn.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -564,11 +562,11 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn2) tp := textproto.NewReader(reader) data2, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.mymeasurement;host=192.168.0.1 3.14 1289430000", data2) + require.Equal(t, "my.prefix.mymeasurement;host=192.168.0.1 3.14 1289430000", data2) data3, _ := tp.ReadLine() - assert.Equal(t, "my.prefix.my_measurement;host=192.168.0.1 3.14 1289430000", data3) - conn2.Close() - tcpServer.Close() + require.Equal(t, "my.prefix.my_measurement;host=192.168.0.1 3.14 1289430000", data3) + require.NoError(t, conn2.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -580,9 +578,9 @@ func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) data1, _ := tp.ReadLine() - assert.Equal(t, "my_prefix_mymeasurement_myfield;host=192.168.0.1 3.14 1289430000", data1) - conn.Close() - tcpServer.Close() + require.Equal(t, "my_prefix_mymeasurement_myfield;host=192.168.0.1 3.14 1289430000", data1) + require.NoError(t, conn.Close()) + require.NoError(t, tcpServer.Close()) }() } @@ -594,10 +592,10 @@ func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { reader := bufio.NewReader(conn2) tp := textproto.NewReader(reader) data2, _ := tp.ReadLine() - assert.Equal(t, "my_prefix_mymeasurement;host=192.168.0.1 3.14 1289430000", data2) + require.Equal(t, "my_prefix_mymeasurement;host=192.168.0.1 3.14 1289430000", data2) data3, _ := tp.ReadLine() - assert.Equal(t, "my_prefix_my_measurement;host=192.168.0.1 3.14 1289430000", data3) - conn2.Close() - tcpServer.Close() + require.Equal(t, "my_prefix_my_measurement;host=192.168.0.1 3.14 1289430000", data3) + require.NoError(t, conn2.Close()) + require.NoError(t, tcpServer.Close()) }() } diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 16b744f35ccdc..b408b6372be30 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -97,7 +97,10 @@ func newGelfWriter(cfg gelfConfig, dialer *net.Dialer, tlsConfig *tls.Config) ge } func (g *gelfUDP) Write(message []byte) (n int, err error) { - compressed := g.compress(message) + compressed, err := g.compress(message) + if err != nil { + return 0, err + } chunksize := g.gelfConfig.MaxChunkSizeWan length := compressed.Len() @@ -106,10 +109,17 @@ func (g *gelfUDP) Write(message []byte) (n int, err error) { chunkCountInt := int(math.Ceil(float64(length) / float64(chunksize))) id := make([]byte, 8) - rand.Read(id) + _, err = rand.Read(id) + if err != nil { + return 0, err + } for i, index := 0, 0; i < length; i, index = i+chunksize, index+1 { - packet := g.createChunkedMessage(index, chunkCountInt, id, &compressed) + packet, err := g.createChunkedMessage(index, chunkCountInt, id, &compressed) + if err != nil { + return 0, err + } + err = g.send(packet.Bytes()) if err != nil { return 0, err @@ -136,21 +146,40 @@ func (g *gelfUDP) Close() (err error) { return err } -func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { +func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) (bytes.Buffer, error) { var packet bytes.Buffer chunksize := g.getChunksize() - packet.Write(g.intToBytes(30)) - packet.Write(g.intToBytes(15)) - packet.Write(id) + b, err := g.intToBytes(30) + if err != nil { + return packet, err + } + packet.Write(b) //nolint:revive // from buffer.go: "err is always nil" - packet.Write(g.intToBytes(index)) - packet.Write(g.intToBytes(chunkCountInt)) + b, err = g.intToBytes(15) + if err != nil { + return packet, err + } + packet.Write(b) //nolint:revive // from buffer.go: "err is always nil" - packet.Write(compressed.Next(chunksize)) + packet.Write(id) //nolint:revive // from buffer.go: "err is always nil" - return packet + b, err = g.intToBytes(index) + if err != nil { + return packet, err + } + packet.Write(b) //nolint:revive // from buffer.go: "err is always nil" + + b, err = g.intToBytes(chunkCountInt) + if err != nil { + return packet, err + } + packet.Write(b) //nolint:revive // from buffer.go: "err is always nil" + + packet.Write(compressed.Next(chunksize)) //nolint:revive // from buffer.go: "err is always nil" + + return packet, nil } func (g *gelfUDP) getChunksize() int { @@ -165,21 +194,30 @@ func (g *gelfUDP) getChunksize() int { return g.gelfConfig.MaxChunkSizeWan } -func (g *gelfUDP) intToBytes(i int) []byte { +func (g *gelfUDP) intToBytes(i int) ([]byte, error) { buf := new(bytes.Buffer) - binary.Write(buf, binary.LittleEndian, int8(i)) - return buf.Bytes() + err := binary.Write(buf, binary.LittleEndian, int8(i)) + if err != nil { + return nil, err + } + + return buf.Bytes(), err } -func (g *gelfUDP) compress(b []byte) bytes.Buffer { +func (g *gelfUDP) compress(b []byte) (bytes.Buffer, error) { var buf bytes.Buffer comp := zlib.NewWriter(&buf) - comp.Write(b) - comp.Close() + if _, err := comp.Write(b); err != nil { + return bytes.Buffer{}, err + } + + if err := comp.Close(); err != nil { + return bytes.Buffer{}, err + } - return buf + return buf, nil } func (g *gelfUDP) Connect() error { diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index a270f279b631f..e8577fb43c3a8 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -11,11 +11,11 @@ import ( "testing" "time" - tlsint "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/influxdata/telegraf/testutil" reuse "github.com/libp2p/go-reuseport" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/testutil" ) func TestWriteUDP(t *testing.T) { @@ -183,14 +183,14 @@ func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Gr var obj GelfObject _ = json.Unmarshal(bufW.Bytes(), &obj) require.NoError(t, err) - assert.Equal(t, obj["short_message"], "telegraf") + require.Equal(t, obj["short_message"], "telegraf") if config.NameFieldNoPrefix { - assert.Equal(t, obj["name"], "test1") + require.Equal(t, obj["name"], "test1") } else { - assert.Equal(t, obj["_name"], "test1") + require.Equal(t, obj["_name"], "test1") } - assert.Equal(t, obj["_tag1"], "value1") - assert.Equal(t, obj["_value"], float64(1)) + require.Equal(t, obj["_tag1"], "value1") + require.Equal(t, obj["_value"], float64(1)) } // in UDP scenario all 4 messages are received @@ -238,10 +238,10 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync. var obj GelfObject err = json.Unmarshal(bufW.Bytes(), &obj) require.NoError(t, err) - assert.Equal(t, obj["short_message"], "telegraf") - assert.Equal(t, obj["_name"], "test1") - assert.Equal(t, obj["_tag1"], "value1") - assert.Equal(t, obj["_value"], float64(1)) + require.Equal(t, obj["short_message"], "telegraf") + require.Equal(t, obj["_name"], "test1") + require.Equal(t, obj["_tag1"], "value1") + require.Equal(t, obj["_value"], float64(1)) } conn := accept() diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go index 4541659cec030..0782f7be20b93 100644 --- a/plugins/outputs/health/health.go +++ b/plugins/outputs/health/health.go @@ -208,9 +208,9 @@ func (h *Health) Close() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - h.server.Shutdown(ctx) + err := h.server.Shutdown(ctx) h.wg.Wait() - return nil + return err } // Origin returns the URL of the HTTP server. diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index 03a08fca21e7b..e155a6cba0929 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -6,10 +6,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/health" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") @@ -119,6 +120,7 @@ func TestHealth(t *testing.T) { resp, err := http.Get(output.Origin()) require.NoError(t, err) + defer resp.Body.Close() require.Equal(t, tt.expectedCode, resp.StatusCode) _, err = io.ReadAll(resp.Body) diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index b866c60218005..bd261d4125c50 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -150,7 +150,7 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { return err } - return h.write(reqBody) + return h.writeMetric(reqBody) } for _, metric := range metrics { @@ -160,14 +160,14 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { return err } - if err := h.write(reqBody); err != nil { + if err := h.writeMetric(reqBody); err != nil { return err } } return nil } -func (h *HTTP) write(reqBody []byte) error { +func (h *HTTP) writeMetric(reqBody []byte) error { var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) var err error diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index a5fc49b84c4f4..5c8488cce692d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -10,15 +10,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" httpconfig "github.com/influxdata/telegraf/plugins/common/http" - oauth "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/common/oauth" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" - "github.com/stretchr/testify/require" ) func getMetric() telegraf.Metric { @@ -408,7 +409,8 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { values.Add("access_token", token) values.Add("token_type", "bearer") values.Add("expires_in", "3600") - w.Write([]byte(values.Encode())) + _, err = w.Write([]byte(values.Encode())) + require.NoError(t, err) }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index ac85814db1f34..992ecf796040e 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -456,10 +456,10 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { return req, nil } -func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { +func (c *httpClient) makeWriteRequest(address string, body io.Reader) (*http.Request, error) { var err error - req, err := http.NewRequest("POST", url, body) + req, err := http.NewRequest("POST", address, body) if err != nil { return nil, fmt.Errorf("failed creating new request: %s", err.Error()) } diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 62848417b124c..fb629a40d0b10 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -106,7 +106,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error _, err = c.conn.Write(scanner.Bytes()) } if err != nil { - c.conn.Close() + _ = c.conn.Close() c.conn = nil return err } diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 25e03f72173ee..dda1f9412ef37 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -11,11 +11,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var ( @@ -91,7 +92,7 @@ func TestUDP_Simple(t *testing.T) { DialContextF: func(network, address string) (influxdb.Conn, error) { conn := &MockConn{ WriteF: func(b []byte) (n int, err error) { - buffer.Write(b) + buffer.Write(b) //nolint:revive // MockConn with always-success return return 0, nil }, } diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index 098ebd9dd5c4d..ee29382888d7c 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "log" "math" "net" "net/http" @@ -55,6 +54,7 @@ type HTTPConfig struct { TLSConfig *tls.Config Serializer *influx.Serializer + Log telegraf.Logger } type httpClient struct { @@ -71,6 +71,7 @@ type httpClient struct { url *url.URL retryTime time.Time retryCount int + log telegraf.Logger } func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { @@ -142,6 +143,7 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { Bucket: config.Bucket, BucketTag: config.BucketTag, ExcludeBucketTag: config.ExcludeBucketTag, + log: config.Log, } return client, nil } @@ -296,7 +298,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // Clients should *not* repeat the request and the metrics should be dropped. http.StatusUnprocessableEntity, http.StatusNotAcceptable: - log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + c.log.Errorf("Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) return nil case http.StatusUnauthorized, http.StatusForbidden: return fmt.Errorf("failed to write metric (%s): %s", resp.Status, desc) @@ -308,14 +310,14 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te c.retryCount++ retryDuration := c.getRetryDuration(resp.Header) c.retryTime = time.Now().Add(retryDuration) - log.Printf("W! [outputs.influxdb_v2] Failed to write; will retry in %s. (%s)\n", retryDuration, resp.Status) + c.log.Warnf("Failed to write; will retry in %s. (%s)\n", retryDuration, resp.Status) return fmt.Errorf("waiting %s for server before sending metric again", retryDuration) } // if it's any other 4xx code, the client should not retry as it's the client's mistake. // retrying will not make the request magically work. if len(resp.Status) > 0 && resp.Status[0] == '4' { - log.Printf("E! [outputs.influxdb_v2] Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) + c.log.Errorf("Failed to write metric (will be dropped: %s): %s\n", resp.Status, desc) return nil } @@ -357,10 +359,10 @@ func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { return time.Duration(retry*1000) * time.Millisecond } -func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { +func (c *httpClient) makeWriteRequest(address string, body io.Reader) (*http.Request, error) { var err error - req, err := http.NewRequest("POST", url, body) + req, err := http.NewRequest("POST", address, body) if err != nil { return nil, err } diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 10e2a4e133eeb..96a11324c37ff 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -11,8 +11,8 @@ import ( ) func genURL(u string) *url.URL { - URL, _ := url.Parse(u) - return URL + address, _ := url.Parse(u) + return address } func TestMakeWriteURL(t *testing.T) { diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index e44729eec1b7a..bce1dfe3d04e0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -9,15 +9,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func genURL(u string) *url.URL { - URL, _ := url.Parse(u) - return URL + address, _ := url.Parse(u) + return address } func TestNewHTTPClient(t *testing.T) { tests := []struct { @@ -60,7 +61,8 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/api/v2/write": - r.ParseForm() + err := r.ParseForm() + require.NoError(t, err) require.Equal(t, r.Form["bucket"], []string{"foo"}) body, err := io.ReadAll(r.Body) diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go index e188ddbae94d1..cdaefc41d8ecd 100644 --- a/plugins/outputs/influxdb_v2/influxdb.go +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -170,14 +170,14 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { return err } -func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) { +func (i *InfluxDB) getHTTPClient(address *url.URL, proxy *url.URL) (Client, error) { tlsConfig, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err } - config := &HTTPConfig{ - URL: url, + httpConfig := &HTTPConfig{ + URL: address, Token: i.Token, Organization: i.Organization, Bucket: i.Bucket, @@ -190,11 +190,12 @@ func (i *InfluxDB) getHTTPClient(url *url.URL, proxy *url.URL) (Client, error) { ContentEncoding: i.ContentEncoding, TLSConfig: tlsConfig, Serializer: i.newSerializer(), + Log: i.Log, } - c, err := NewHTTPClient(config) + c, err := NewHTTPClient(httpConfig) if err != nil { - return nil, fmt.Errorf("error creating HTTP client [%s]: %v", url, err) + return nil, fmt.Errorf("error creating HTTP client [%s]: %v", address, err) } return c, nil diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index f7158f16fc4c3..b0b52a921d86c 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -75,9 +75,9 @@ func (i *Instrumental) Connect() error { } func (i *Instrumental) Close() error { - i.conn.Close() + err := i.conn.Close() i.conn = nil - return nil + return err } func (i *Instrumental) Write(metrics []telegraf.Metric) error { @@ -138,23 +138,23 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { splitStat := strings.SplitN(stat, " ", 3) name := splitStat[0] value := splitStat[1] - time := splitStat[2] + timestamp := splitStat[2] // replace invalid components of metric name with underscore cleanMetric := MetricNameReplacer.ReplaceAllString(name, "_") if !ValueIncludesBadChar.MatchString(value) { - points = append(points, fmt.Sprintf("%s %s %s %s", metricType, cleanMetric, value, time)) + points = append(points, fmt.Sprintf("%s %s %s %s", metricType, cleanMetric, value, timestamp)) } } } allPoints := strings.Join(points, "") - _, err = fmt.Fprintf(i.conn, allPoints) + _, err = fmt.Fprint(i.conn, allPoints) if err != nil { if err == io.EOF { - i.Close() + _ = i.Close() } return err @@ -163,7 +163,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { // force the connection closed after sending data // to deal with various disconnection scenarios and eschew holding // open idle connections en masse - i.Close() + _ = i.Close() return nil } diff --git a/plugins/outputs/instrumental/instrumental_test.go b/plugins/outputs/instrumental/instrumental_test.go index f72b9e90f0806..b55c6b33db9ee 100644 --- a/plugins/outputs/instrumental/instrumental_test.go +++ b/plugins/outputs/instrumental/instrumental_test.go @@ -8,9 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) func TestWrite(t *testing.T) { @@ -39,7 +40,8 @@ func TestWrite(t *testing.T) { ) metrics := []telegraf.Metric{m1, m2} - i.Write(metrics) + err := i.Write(metrics) + require.NoError(t, err) // Counter and Histogram are increments m3 := metric.New( @@ -70,7 +72,8 @@ func TestWrite(t *testing.T) { ) metrics = []telegraf.Metric{m3, m4, m5, m6} - i.Write(metrics) + err = i.Write(metrics) + require.NoError(t, err) wg.Wait() } @@ -80,44 +83,49 @@ func TCPServer(t *testing.T, wg *sync.WaitGroup) { go func() { defer wg.Done() conn, _ := tcpServer.Accept() - conn.SetDeadline(time.Now().Add(1 * time.Second)) + err := conn.SetDeadline(time.Now().Add(1 * time.Second)) + require.NoError(t, err) reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) hello, _ := tp.ReadLine() - assert.Equal(t, "hello version go/telegraf/1.1", hello) + require.Equal(t, "hello version go/telegraf/1.1", hello) auth, _ := tp.ReadLine() - assert.Equal(t, "authenticate abc123token", auth) - conn.Write([]byte("ok\nok\n")) + require.Equal(t, "authenticate abc123token", auth) + _, err = conn.Write([]byte("ok\nok\n")) + require.NoError(t, err) data1, _ := tp.ReadLine() - assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) + require.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement.myfield 3.14 1289430000", data1) data2, _ := tp.ReadLine() - assert.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) + require.Equal(t, "gauge my.prefix.192_168_0_1.mymeasurement 3.14 1289430000", data2) conn, _ = tcpServer.Accept() - conn.SetDeadline(time.Now().Add(1 * time.Second)) + err = conn.SetDeadline(time.Now().Add(1 * time.Second)) + require.NoError(t, err) reader = bufio.NewReader(conn) tp = textproto.NewReader(reader) hello, _ = tp.ReadLine() - assert.Equal(t, "hello version go/telegraf/1.1", hello) + require.Equal(t, "hello version go/telegraf/1.1", hello) auth, _ = tp.ReadLine() - assert.Equal(t, "authenticate abc123token", auth) - conn.Write([]byte("ok\nok\n")) + require.Equal(t, "authenticate abc123token", auth) + _, err = conn.Write([]byte("ok\nok\n")) + require.NoError(t, err) data3, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3) + require.Equal(t, "increment my.prefix.192_168_0_1.my_histogram 3.14 1289430000", data3) data4, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4) + require.Equal(t, "increment my.prefix.192_168_0_1_8888_123.bad_metric_name 1 1289430000", data4) data5, _ := tp.ReadLine() - assert.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5) + require.Equal(t, "increment my.prefix.192_168_0_1.my_counter 3.14 1289430000", data5) data6, _ := tp.ReadLine() - assert.Equal(t, "", data6) + require.Equal(t, "", data6) - conn.Close() + err = conn.Close() + require.NoError(t, err) }() } diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 2972427001ef5..90fd7259e107e 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -8,6 +8,7 @@ import ( "github.com/Shopify/sarama" "github.com/gofrs/uuid" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/outputs" @@ -228,7 +229,7 @@ func ValidateTopicSuffixMethod(method string) error { return nil } } - return fmt.Errorf("Unknown topic suffix method provided: %s", method) + return fmt.Errorf("unknown topic suffix method provided: %s", method) } func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) { @@ -379,7 +380,7 @@ func (k *Kafka) Write(metrics []telegraf.Metric) error { k.Log.Error("The timestamp of the message is out of acceptable range, consider increasing broker `message.timestamp.difference.max.ms`; dropping batch") return nil } - return prodErr + return prodErr //nolint:staticcheck // Return first error encountered } } return err diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index 0edaed31f41f3..c7fcc19e679a3 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -5,11 +5,12 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type topicSuffixTestpair struct { @@ -50,10 +51,10 @@ func TestTopicSuffixesIntegration(t *testing.T) { topic := "Test" - metric := testutil.TestMetric(1) + m := testutil.TestMetric(1) metricTagName := "tag1" - metricTagValue := metric.Tags()[metricTagName] - metricName := metric.Name() + metricTagValue := m.Tags()[metricTagName] + metricName := m.Name() var testcases = []topicSuffixTestpair{ // This ensures empty separator is okay @@ -85,7 +86,7 @@ func TestTopicSuffixesIntegration(t *testing.T) { TopicSuffix: topicSuffix, } - _, topic := k.GetTopicName(metric) + _, topic := k.GetTopicName(m) require.Equal(t, expectedTopic, topic) } } diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 89724ef1805d2..ef2481b60911e 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -9,12 +9,12 @@ import ( "github.com/aws/aws-sdk-go-v2/service/kinesis" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const testPartitionKey = "partitionKey" @@ -24,7 +24,6 @@ const testStreamName = "streamName" const zero int64 = 0 func TestPartitionKey(t *testing.T) { - assert := assert.New(t) testPoint := testutil.TestMetric(1) k := KinesisOutput{ @@ -34,7 +33,7 @@ func TestPartitionKey(t *testing.T) { Key: "-", }, } - assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") + require.Equal(t, "-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ Log: testutil.Logger{}, @@ -43,7 +42,7 @@ func TestPartitionKey(t *testing.T) { Key: "tag1", }, } - assert.Equal(testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") + require.Equal(t, testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") k = KinesisOutput{ Log: testutil.Logger{}, @@ -53,7 +52,7 @@ func TestPartitionKey(t *testing.T) { Default: "somedefault", }, } - assert.Equal("somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") + require.Equal(t, "somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") k = KinesisOutput{ Log: testutil.Logger{}, @@ -62,7 +61,7 @@ func TestPartitionKey(t *testing.T) { Key: "doesnotexist", }, } - assert.Equal("telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") + require.Equal(t, "telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") k = KinesisOutput{ Log: testutil.Logger{}, @@ -70,7 +69,7 @@ func TestPartitionKey(t *testing.T) { Method: "not supported", }, } - assert.Equal("", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") + require.Equal(t, "", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") k = KinesisOutput{ Log: testutil.Logger{}, @@ -78,7 +77,7 @@ func TestPartitionKey(t *testing.T) { Method: "measurement", }, } - assert.Equal(testPoint.Name(), k.getPartitionKey(testPoint), "PartitionKey should be value of measurement name") + require.Equal(t, testPoint.Name(), k.getPartitionKey(testPoint), "PartitionKey should be value of measurement name") k = KinesisOutput{ Log: testutil.Logger{}, @@ -88,14 +87,14 @@ func TestPartitionKey(t *testing.T) { } partitionKey := k.getPartitionKey(testPoint) u, err := uuid.FromString(partitionKey) - assert.Nil(err, "Issue parsing UUID") - assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") + require.NoError(t, err, "Issue parsing UUID") + require.Equal(t, byte(4), u.Version(), "PartitionKey should be UUIDv4") k = KinesisOutput{ Log: testutil.Logger{}, PartitionKey: "-", } - assert.Equal("-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") + require.Equal(t, "-", k.getPartitionKey(testPoint), "PartitionKey should be '-'") k = KinesisOutput{ Log: testutil.Logger{}, @@ -103,13 +102,11 @@ func TestPartitionKey(t *testing.T) { } partitionKey = k.getPartitionKey(testPoint) u, err = uuid.FromString(partitionKey) - assert.Nil(err, "Issue parsing UUID") - assert.Equal(byte(4), u.Version(), "PartitionKey should be UUIDv4") + require.NoError(t, err, "Issue parsing UUID") + require.Equal(t, byte(4), u.Version(), "PartitionKey should be UUIDv4") } func TestWriteKinesis_WhenSuccess(t *testing.T) { - assert := assert.New(t) - records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), @@ -135,7 +132,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { } elapsed := k.writeKinesis(records) - assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero) svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -146,8 +143,6 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { } func TestWriteKinesis_WhenRecordErrors(t *testing.T) { - assert := assert.New(t) - records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), @@ -173,7 +168,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { } elapsed := k.writeKinesis(records) - assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero) svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -184,8 +179,6 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { } func TestWriteKinesis_WhenServiceError(t *testing.T) { - assert := assert.New(t) - records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), @@ -205,7 +198,7 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { } elapsed := k.writeKinesis(records) - assert.GreaterOrEqual(elapsed.Nanoseconds(), zero) + require.GreaterOrEqual(t, elapsed.Nanoseconds(), zero) svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -216,7 +209,6 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { } func TestWrite_NoMetrics(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -232,13 +224,12 @@ func TestWrite_NoMetrics(t *testing.T) { } err := k.Write([]telegraf.Metric{}) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{}) } func TestWrite_SingleMetric(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -257,7 +248,7 @@ func TestWrite_SingleMetric(t *testing.T) { metric, metricData := createTestMetric(t, "metric1", serializer) err := k.Write([]telegraf.Metric{metric}) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -273,7 +264,6 @@ func TestWrite_SingleMetric(t *testing.T) { } func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -292,7 +282,7 @@ func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { metrics, metricsData := createTestMetrics(t, 3, serializer) err := k.Write(metrics) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -305,7 +295,6 @@ func TestWrite_MultipleMetrics_SinglePartialRequest(t *testing.T) { } func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -324,7 +313,7 @@ func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest, serializer) err := k.Write(metrics) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -337,7 +326,6 @@ func TestWrite_MultipleMetrics_SingleFullRequest(t *testing.T) { } func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -357,7 +345,7 @@ func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest+1, serializer) err := k.Write(metrics) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -376,7 +364,6 @@ func TestWrite_MultipleMetrics_MultipleRequests(t *testing.T) { } func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -396,7 +383,7 @@ func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { metrics, metricsData := createTestMetrics(t, maxRecordsPerRequest*2, serializer) err := k.Write(metrics) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { @@ -415,7 +402,6 @@ func TestWrite_MultipleMetrics_MultipleFullRequests(t *testing.T) { } func TestWrite_SerializerError(t *testing.T) { - assert := assert.New(t) serializer := influx.NewSerializer() svc := &mockKinesisPutRecords{} @@ -443,7 +429,7 @@ func TestWrite_SerializerError(t *testing.T) { invalidMetric, metric2, }) - assert.Nil(err, "Should not return error") + require.NoError(t, err, "Should not return error") // remaining valid metrics should still get written svc.AssertRequests(t, []*kinesis.PutRecordsInput{ @@ -519,7 +505,7 @@ func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { func (m *mockKinesisPutRecords) PutRecords(_ context.Context, input *kinesis.PutRecordsInput, _ ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) { reqNum := len(m.requests) if reqNum > len(m.responses) { - return nil, fmt.Errorf("Response for request %+v not setup", reqNum) + return nil, fmt.Errorf("response for request %+v not setup", reqNum) } m.requests = append(m.requests, input) diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index dc1e9b6fa7856..ff3e599017c10 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -118,53 +118,61 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { // make sur we send a batch of maximum 300 sizeBatch := 300 for start := 0; start < metricCounter; start += sizeBatch { - lmetrics := LMetrics{} - end := start + sizeBatch - if end > metricCounter { - end = metricCounter - sizeBatch = end - start - } - lmetrics.Gauges = make([]*Gauge, sizeBatch) - copy(lmetrics.Gauges, tempGauges[start:end]) - metricsBytes, err := json.Marshal(lmetrics) + err := l.writeBatch(start, sizeBatch, metricCounter, tempGauges) if err != nil { - return fmt.Errorf("unable to marshal Metrics, %s", err.Error()) + return err } + } - l.Log.Debugf("Librato request: %v", string(metricsBytes)) + return nil +} - req, err := http.NewRequest( - "POST", - l.APIUrl, - bytes.NewBuffer(metricsBytes)) - if err != nil { - return fmt.Errorf("unable to create http.Request, %s", err.Error()) - } - req.Header.Add("Content-Type", "application/json") - req.SetBasicAuth(l.APIUser, l.APIToken) +func (l *Librato) writeBatch(start int, sizeBatch int, metricCounter int, tempGauges []*Gauge) error { + lmetrics := LMetrics{} + end := start + sizeBatch + if end > metricCounter { + end = metricCounter + sizeBatch = end - start + } + lmetrics.Gauges = make([]*Gauge, sizeBatch) + copy(lmetrics.Gauges, tempGauges[start:end]) + metricsBytes, err := json.Marshal(lmetrics) + if err != nil { + return fmt.Errorf("unable to marshal Metrics, %s", err.Error()) + } - resp, err := l.client.Do(req) + l.Log.Debugf("Librato request: %v", string(metricsBytes)) + + req, err := http.NewRequest( + "POST", + l.APIUrl, + bytes.NewBuffer(metricsBytes)) + if err != nil { + return fmt.Errorf("unable to create http.Request, %s", err.Error()) + } + req.Header.Add("Content-Type", "application/json") + req.SetBasicAuth(l.APIUser, l.APIToken) + + resp, err := l.client.Do(req) + if err != nil { + l.Log.Debugf("Error POSTing metrics: %v", err.Error()) + return fmt.Errorf("error POSTing metrics, %s", err.Error()) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 || l.Debug { + htmlData, err := io.ReadAll(resp.Body) if err != nil { - l.Log.Debugf("Error POSTing metrics: %v", err.Error()) - return fmt.Errorf("error POSTing metrics, %s", err.Error()) + l.Log.Debugf("Couldn't get response! (%v)", err) } - defer resp.Body.Close() - - if resp.StatusCode != 200 || l.Debug { - htmlData, err := io.ReadAll(resp.Body) - if err != nil { - l.Log.Debugf("Couldn't get response! (%v)", err) - } - if resp.StatusCode != 200 { - return fmt.Errorf( - "received bad status code, %d\n %s", - resp.StatusCode, - string(htmlData)) - } - l.Log.Debugf("Librato response: %v", string(htmlData)) + if resp.StatusCode != 200 { + return fmt.Errorf( + "received bad status code, %d\n %s", + resp.StatusCode, + string(htmlData)) } + l.Log.Debugf("Librato response: %v", string(htmlData)) } - return nil } @@ -219,8 +227,9 @@ func verifyValue(v interface{}) bool { switch v.(type) { case string: return false + default: + return true } - return true } func (g *Gauge) setValue(v interface{}) error { @@ -230,7 +239,7 @@ func (g *Gauge) setValue(v interface{}) error { case uint64: g.Value = float64(d) case float64: - g.Value = float64(d) + g.Value = d case bool: if d { g.Value = float64(1.0) diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index fcf96e55f6429..c3787e952449f 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -11,13 +11,14 @@ import ( "strings" "time" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" - "golang.org/x/oauth2" - "golang.org/x/oauth2/clientcredentials" ) const ( @@ -126,7 +127,7 @@ func (l *Loki) Connect() (err error) { return fmt.Errorf("http client fail: %w", err) } - return + return nil } func (l *Loki) Close() error { @@ -155,10 +156,10 @@ func (l *Loki) Write(metrics []telegraf.Metric) error { s.insertLog(tags, Log{fmt.Sprintf("%d", m.Time().UnixNano()), line}) } - return l.write(s) + return l.writeMetrics(s) } -func (l *Loki) write(s Streams) error { +func (l *Loki) writeMetrics(s Streams) error { bs, err := json.Marshal(s) if err != nil { return fmt.Errorf("json.Marshal: %w", err) diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index 6f0678e8dd4b5..3050f7acbf1dd 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -11,11 +11,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) func getMetric() telegraf.Metric { @@ -329,7 +329,8 @@ func TestOAuthClientCredentialsGrant(t *testing.T) { values.Add("access_token", token) values.Add("token_type", "bearer") values.Add("expires_in", "3600") - w.Write([]byte(values.Encode())) + _, err = w.Write([]byte(values.Encode())) + require.NoError(t, err) }, handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 54203ee0dba66..20c4885fa142b 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -2,12 +2,12 @@ package mqtt import ( "fmt" - "log" "strings" "sync" "time" paho "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" @@ -82,9 +82,10 @@ type MQTT struct { QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` - KeepAlive int64 `toml:"keep_alive"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` + Log telegraf.Logger `toml:"-"` client paho.Client opts *paho.ClientOptions @@ -164,13 +165,13 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { } else { buf, err := m.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.mqtt] Could not serialize metric: %v", err) + m.Log.Debugf("Could not serialize metric: %v", err) continue } err = m.publish(topic, buf) if err != nil { - return fmt.Errorf("Could not write to MQTT server, %s", err) + return fmt.Errorf("could not write to MQTT server, %s", err) } } } @@ -183,7 +184,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { } publisherr := m.publish(key, buf) if publisherr != nil { - return fmt.Errorf("Could not write to MQTT server, %s", publisherr) + return fmt.Errorf("could not write to MQTT server, %s", publisherr) } } diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index f4cf35b16e4f7..9f7780eea5219 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -2,14 +2,14 @@ package nats import ( "fmt" - "log" "strings" + "github.com/nats-io/nats.go" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/nats-io/nats.go" ) type NATS struct { @@ -23,6 +23,8 @@ type NATS struct { tls.ClientConfig + Log telegraf.Logger `toml:"-"` + conn *nats.Conn serializer serializers.Serializer } @@ -121,7 +123,7 @@ func (n *NATS) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.nats] Could not serialize metric: %v", err) + n.Log.Debugf("Could not serialize metric: %v", err) continue } diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go index 02b2b9c3ff0ae..5290d4e6a7640 100644 --- a/plugins/outputs/newrelic/newrelic.go +++ b/plugins/outputs/newrelic/newrelic.go @@ -8,11 +8,12 @@ import ( "net/url" "time" + "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" + "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" - "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" ) // NewRelic nr structure @@ -27,7 +28,7 @@ type NewRelic struct { dc *cumulative.DeltaCalculator savedErrors map[int]interface{} errorCount int - client http.Client `toml:"-"` + client http.Client } // Description returns a one-sentence description on the Output diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index a9e2d94ac0bc0..6d719d0a088f2 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,19 +2,20 @@ package nsq import ( "fmt" - "log" + + "github.com/nsqio/go-nsq" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/nsqio/go-nsq" ) type NSQ struct { - Server string - Topic string - producer *nsq.Producer + Server string + Topic string + Log telegraf.Logger `toml:"-"` + producer *nsq.Producer serializer serializers.Serializer } @@ -68,13 +69,13 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - log.Printf("D! [outputs.nsq] Could not serialize metric: %v", err) + n.Log.Debugf("Could not serialize metric: %v", err) continue } err = n.producer.Publish(n.Topic, buf) if err != nil { - return fmt.Errorf("FAILED to send NSQD message: %s", err) + return fmt.Errorf("failed to send NSQD message: %s", err) } } return nil From 84e7a6acbe14b5188b4dbe6abee2872b1ee44ee0 Mon Sep 17 00:00:00 2001 From: Samantha Wang <32681364+sjwang90@users.noreply.github.com> Date: Wed, 24 Nov 2021 11:59:41 -0800 Subject: [PATCH 086/133] docs: clean up links (#10135) --- plugins/inputs/httpjson/README.md | 4 +++- plugins/inputs/jolokia/README.md | 11 ++++++----- plugins/inputs/kafka_consumer_legacy/README.md | 4 +++- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index e001e5b07b266..8782e71e3eb44 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,6 +1,6 @@ # HTTP JSON Input Plugin -**DEPRECATED in Telegraf v1.6: Use [HTTP input plugin](../http) as replacement** +## DEPRECATED in Telegraf v1.6: Use [HTTP input plugin][] as replacement The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. @@ -134,3 +134,5 @@ If the service returns an array of objects, one metric is be created for each ob `httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` `httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` + +[HTTP input plugin]: /plugins/inputs/http diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 0fdd25a94d86a..3b152f8e096a4 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,8 +1,8 @@ # Jolokia Input Plugin -## Deprecated in version 1.5: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin +## Deprecated in version 1.5: Please use the [jolokia2][] plugin -### Configuration +## Configuration ```toml # Read JMX metrics through Jolokia @@ -61,14 +61,15 @@ attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" ``` -#### Description +## Description The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. - -See: +See [official Jolokia website](https://jolokia.org/) for more information. ## Measurements Jolokia plugin produces one measure for each metric configured, adding Server's `jolokia_name`, `jolokia_host` and `jolokia_port` as tags. + +[jolokia2]: /plugins/inputs/jolokia2 diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 1faf4c2305e3d..59b1767812c25 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,6 +1,6 @@ # Kafka Consumer Legacy Input Plugin -## Deprecated in version 1.4. Please use [Kafka Consumer input plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kafka_consumer) +## Deprecated in version 1.4. Please use [Kafka Consumer input plugin][] The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the @@ -43,3 +43,5 @@ from the same topic in parallel. Running integration tests requires running Zookeeper & Kafka. See Makefile for kafka container command. + +[Kafka Consumer input plugin]: /plugins/inputs/kafka_consumer From 9bd0c6121e3a94f8a693e32ec06fb6c293e75381 Mon Sep 17 00:00:00 2001 From: Mya Date: Wed, 24 Nov 2021 13:57:14 -0700 Subject: [PATCH 087/133] fix: failing ci on master (#10175) --- go.mod | 2 +- go.sum | 4 ++-- plugins/outputs/influxdb_v2/http.go | 4 ++-- plugins/outputs/influxdb_v2/http_test.go | 1 + 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 65107c40d7a34..75496c6646c71 100644 --- a/go.mod +++ b/go.mod @@ -196,7 +196,7 @@ require ( github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/jwt/v2 v2.1.0 // indirect - github.com/nats-io/nats-server/v2 v2.6.3 + github.com/nats-io/nats-server/v2 v2.6.5 github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect diff --git a/go.sum b/go.sum index f36bcce7474d7..727dc6e6d2611 100644 --- a/go.sum +++ b/go.sum @@ -1582,8 +1582,8 @@ github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL github.com/nats-io/jwt/v2 v2.1.0 h1:1UbfD5g1xTdWmSeRV8bh/7u+utTiBsRtWhLl1PixZp4= github.com/nats-io/jwt/v2 v2.1.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.6.3 h1:/ponRuIBtTiVDZRBjTKP+Cm/SWpvovI3vuB3pkpRQWw= -github.com/nats-io/nats-server/v2 v2.6.3/go.mod h1:LlMieumxNUnCloOTVFv7Wog0YnasScxARUMXVXv9/+M= +github.com/nats-io/nats-server/v2 v2.6.5 h1:VTG8gdSw4bEqMwKudOHkBLqGwNpNaJOwruj3+rquQlQ= +github.com/nats-io/nats-server/v2 v2.6.5/go.mod h1:LlMieumxNUnCloOTVFv7Wog0YnasScxARUMXVXv9/+M= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 h1:GMx3ZOcMEVM5qnUItQ4eJyQ6ycwmIEB/VC/UxvdevE0= github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index ee29382888d7c..a571a92b06c6e 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -225,7 +225,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } func (c *httpClient) splitAndWriteBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { - log.Printf("W! [outputs.influxdb_v2] Retrying write after splitting metric payload in half to reduce batch size") + c.log.Warnf("Retrying write after splitting metric payload in half to reduce batch size") midpoint := len(metrics) / 2 if err := c.writeBatch(ctx, bucket, metrics[:midpoint]); err != nil { @@ -284,7 +284,7 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te switch resp.StatusCode { // request was too large, send back to try again case http.StatusRequestEntityTooLarge: - log.Printf("E! [outputs.influxdb_v2] Failed to write metric, request was too large (413)") + c.log.Errorf("Failed to write metric, request was too large (413)") return &APIError{ StatusCode: resp.StatusCode, Title: resp.Status, diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index bce1dfe3d04e0..aac87817f108c 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -151,6 +151,7 @@ func TestTooLargeWriteRetry(t *testing.T) { Bucket: "telegraf", BucketTag: "bucket", ExcludeBucketTag: true, + Log: testutil.Logger{}, } client, err := influxdb.NewHTTPClient(config) From 7aa6b533bddcb198b9bef4858db86fff84fcbdb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 30 Nov 2021 20:31:10 +0100 Subject: [PATCH 088/133] fix: Linter fixes for plugins/inputs/[p-z]* (leftovers) (#10193) Co-authored-by: Pawel Zak --- go.mod | 1 - plugins/inputs/raindrops/raindrops.go | 3 +- plugins/inputs/raindrops/raindrops_test.go | 8 +- plugins/inputs/ras/ras.go | 8 +- plugins/inputs/ras/ras_test.go | 32 +- plugins/inputs/redfish/redfish.go | 4 +- plugins/inputs/redfish/redfish_test.go | 62 ++-- plugins/inputs/redis/redis_test.go | 8 +- .../inputs/rethinkdb/rethinkdb_data_test.go | 9 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 48 +-- .../inputs/rethinkdb/rethinkdb_server_test.go | 12 +- .../riemann_listener/riemann_listener.go | 52 ++- .../riemann_listener/riemann_listener_test.go | 19 +- plugins/inputs/snmp/snmp_test.go | 305 +++++++++--------- .../inputs/udp_listener/udp_listener_test.go | 2 +- .../webhooks/filestack/filestack_webhooks.go | 8 +- .../inputs/webhooks/github/github_webhooks.go | 17 +- .../webhooks/github/github_webhooks_test.go | 4 +- .../webhooks/mandrill/mandrill_webhooks.go | 10 +- .../papertrail/papertrail_webhooks.go | 10 +- .../webhooks/particle/particle_webhooks.go | 6 +- .../webhooks/rollbar/rollbar_webhooks.go | 8 +- plugins/inputs/webhooks/webhooks.go | 8 +- 23 files changed, 328 insertions(+), 316 deletions(-) diff --git a/go.mod b/go.mod index 75496c6646c71..2caf8e9aede81 100644 --- a/go.mod +++ b/go.mod @@ -311,7 +311,6 @@ require ( gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - gotest.tools v2.2.0+incompatible k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 k8s.io/client-go v0.22.2 diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 904d5418ec8db..cf1db2d1f6f98 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -116,7 +116,6 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } activeLineStr, activeErr = buf.ReadString('\n') if activeErr != nil { - iterate = false break } if strings.Compare(activeLineStr, "\n") == 0 { @@ -154,7 +153,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } acc.AddFields("raindrops_listen", lis, tags) } - return nil + return nil //nolint:nilerr // nil returned on purpose } // Get tag(s) for the raindrops calling/writing plugin diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 591dd624a10ea..6da64dbb4d207 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -7,11 +7,11 @@ import ( "net/http/httptest" "net/url" "testing" + "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "time" + + "github.com/influxdata/telegraf/testutil" ) const sampleResponse = ` @@ -41,7 +41,7 @@ func TestRaindropsTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := r.getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8d4ba727d7df..e3f35b06e0c8d 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -23,11 +23,11 @@ type Ras struct { DBPath string `toml:"db_path"` Log telegraf.Logger `toml:"-"` - db *sql.DB `toml:"-"` - latestTimestamp time.Time `toml:"-"` - cpuSocketCounters map[int]metricCounters `toml:"-"` - serverCounters metricCounters `toml:"-"` + db *sql.DB + latestTimestamp time.Time + cpuSocketCounters map[int]metricCounters + serverCounters metricCounters } type machineCheckError struct { diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index 656200fde95cc..d4e87dfe5f12c 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -8,9 +8,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestUpdateCounters(t *testing.T) { @@ -19,20 +19,20 @@ func TestUpdateCounters(t *testing.T) { ras.updateCounters(&mce) } - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") for metric, value := range ras.cpuSocketCounters[0] { if metric == processorBase { // processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors, // internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors - assert.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) + require.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) } else { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } @@ -61,9 +61,9 @@ func TestUpdateLatestTimestamp(t *testing.T) { }...) for _, mce := range testData { err := ras.updateLatestTimestamp(mce.Timestamp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) + require.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) } func TestMultipleSockets(t *testing.T) { @@ -99,14 +99,14 @@ func TestMultipleSockets(t *testing.T) { for _, mce := range testData { ras.updateCounters(&mce) } - assert.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") + require.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") for _, metricData := range ras.cpuSocketCounters { for metric, value := range metricData { if metric == levelTwoCache { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) } else { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } } @@ -117,21 +117,21 @@ func TestMissingDatabase(t *testing.T) { ras := newRas() ras.DBPath = "/nonexistent/ras.db" err := ras.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestEmptyDatabase(t *testing.T) { ras := newRas() - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") - assert.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") + require.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") for metric, value := range ras.cpuSocketCounters[0] { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index dcf26b192c651..bda0779c941b6 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -176,8 +176,8 @@ func (r *Redfish) Init() error { return nil } -func (r *Redfish) getData(url string, payload interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (r *Redfish) getData(address string, payload interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 4cbbb045302c1..04a102014490f 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -761,40 +761,42 @@ func TestInvalidDellJSON(t *testing.T) { }, } for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { - http.Error(w, "Unauthorized.", 401) - return - } + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !checkAuth(r, "test", "test") { + http.Error(w, "Unauthorized.", 401) + return + } - switch r.URL.Path { - case "/redfish/v1/Chassis/System.Embedded.1/Thermal": - http.ServeFile(w, r, tt.thermalfilename) - case "/redfish/v1/Chassis/System.Embedded.1/Power": - http.ServeFile(w, r, tt.powerfilename) - case "/redfish/v1/Chassis/System.Embedded.1": - http.ServeFile(w, r, tt.chassisfilename) - case "/redfish/v1/Systems/System.Embedded.1": - http.ServeFile(w, r, tt.hostnamefilename) - default: - w.WriteHeader(http.StatusNotFound) + switch r.URL.Path { + case "/redfish/v1/Chassis/System.Embedded.1/Thermal": + http.ServeFile(w, r, tt.thermalfilename) + case "/redfish/v1/Chassis/System.Embedded.1/Power": + http.ServeFile(w, r, tt.powerfilename) + case "/redfish/v1/Chassis/System.Embedded.1": + http.ServeFile(w, r, tt.chassisfilename) + case "/redfish/v1/Systems/System.Embedded.1": + http.ServeFile(w, r, tt.hostnamefilename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Redfish{ + Address: ts.URL, + Username: "test", + Password: "test", + ComputerSystemID: "System.Embedded.1", } - })) - defer ts.Close() - - plugin := &Redfish{ - Address: ts.URL, - Username: "test", - Password: "test", - ComputerSystemID: "System.Embedded.1", - } - require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Init()) - var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "error parsing input:") + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") + }) } } diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 6f8abbda6be0c..a7ca994c53f80 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/go-redis/redis" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type testClient struct { @@ -165,7 +165,7 @@ func TestRedis_ParseMetrics(t *testing.T) { "total_writes_processed": int64(17), "lazyfree_pending_objects": int64(0), "maxmemory": int64(0), - "maxmemory_policy": string("noeviction"), + "maxmemory_policy": "noeviction", "mem_aof_buffer": int64(0), "mem_clients_normal": int64(17440), "mem_clients_slaves": int64(0), @@ -202,7 +202,7 @@ func TestRedis_ParseMetrics(t *testing.T) { } } } - assert.InDelta(t, + require.InDelta(t, time.Now().Unix()-fields["rdb_last_save_time"].(int64), fields["rdb_last_save_time_elapsed"].(int64), 2) // allow for 2 seconds worth of offset diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index a0c5e4ba8ae57..2f9c90f1e9e7c 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -3,8 +3,9 @@ package rethinkdb import ( "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -36,7 +37,7 @@ func TestAddEngineStats(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) + require.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) } } @@ -67,7 +68,7 @@ func TestAddEngineStatsPartial(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range missingKeys { - assert.False(t, acc.HasInt64Field("rethinkdb", metric)) + require.False(t, acc.HasInt64Field("rethinkdb", metric)) } } @@ -107,6 +108,6 @@ func TestAddStorageStats(t *testing.T) { storage.AddStats(&acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb", metric)) + require.True(t, acc.HasInt64Field("rethinkdb", metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index ffb63e64106e2..553deddcb0219 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,9 +9,9 @@ import ( "strconv" "strings" - "github.com/influxdata/telegraf" - "gopkg.in/gorethink/gorethink.v3" + + "github.com/influxdata/telegraf" ) type Server struct { @@ -37,7 +37,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator) error { return fmt.Errorf("error adding member stats, %s", err.Error()) } - if err := s.addTableStats(acc); err != nil { + if err := s.addTablesStats(acc); err != nil { return fmt.Errorf("error adding table stats, %s", err.Error()) } @@ -49,7 +49,7 @@ func (s *Server) validateVersion() error { return errors.New("could not determine the RethinkDB server version: process.version key missing") } - versionRegexp := regexp.MustCompile("\\d.\\d.\\d") + versionRegexp := regexp.MustCompile(`\d.\d.\d`) versionString := versionRegexp.FindString(s.serverStatus.Process.Version) if versionString == "" { return fmt.Errorf("could not determine the RethinkDB server version: malformed version string (%v)", s.serverStatus.Process.Version) @@ -161,7 +161,7 @@ var TableTracking = []string{ "total_writes", } -func (s *Server) addTableStats(acc telegraf.Accumulator) error { +func (s *Server) addTablesStats(acc telegraf.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) if err != nil { return fmt.Errorf("table stats query error, %s", err.Error()) @@ -174,23 +174,33 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { return errors.New("could not parse table_status results") } for _, table := range tables { - cursor, err := gorethink.DB("rethinkdb").Table("stats"). - Get([]string{"table_server", table.ID, s.serverStatus.ID}). - Run(s.session) + err = s.addTableStats(acc, table) if err != nil { - return fmt.Errorf("table stats query error, %s", err.Error()) - } - defer cursor.Close() - var ts tableStats - if err := cursor.One(&ts); err != nil { - return fmt.Errorf("failure to parse table stats, %s", err.Error()) + return err } + } + return nil +} - tags := s.getDefaultTags() - tags["type"] = "data" - tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) - ts.Engine.AddEngineStats(TableTracking, acc, tags) - ts.Storage.AddStats(acc, tags) +func (s *Server) addTableStats(acc telegraf.Accumulator, table tableStatus) error { + cursor, err := gorethink.DB("rethinkdb").Table("stats"). + Get([]string{"table_server", table.ID, s.serverStatus.ID}). + Run(s.session) + if err != nil { + return fmt.Errorf("table stats query error, %s", err.Error()) + } + defer cursor.Close() + + var ts tableStats + if err := cursor.One(&ts); err != nil { + return fmt.Errorf("failure to parse table stats, %s", err.Error()) } + + tags := s.getDefaultTags() + tags["type"] = "data" + tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) + ts.Engine.AddEngineStats(TableTracking, acc, tags) + ts.Storage.AddStats(acc, tags) + return nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 0119131900b61..0584dcc90c33b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -6,9 +6,9 @@ package rethinkdb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestValidateVersion(t *testing.T) { @@ -39,7 +39,7 @@ func TestAddClusterStats(t *testing.T) { require.NoError(t, err) for _, metric := range ClusterTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -50,7 +50,7 @@ func TestAddMemberStats(t *testing.T) { require.NoError(t, err) for _, metric := range MemberTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -61,7 +61,7 @@ func TestAddTableStats(t *testing.T) { require.NoError(t, err) for _, metric := range TableTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } keys := []string{ @@ -77,6 +77,6 @@ func TestAddTableStats(t *testing.T) { } for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 03b28ad2cb07f..597e2b8847714 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "fmt" "io" - "log" "net" "os" "os/signal" @@ -37,12 +36,12 @@ type RiemannSocketListener struct { wg sync.WaitGroup - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` telegraf.Accumulator } type setReadBufferer interface { - SetReadBuffer(bytes int) error + SetReadBuffer(sizeInBytes int) error } type riemannListener struct { @@ -162,13 +161,6 @@ func readMessages(r io.Reader, p []byte) error { return nil } -func checkError(err error) { - log.Println("The error is") - if err != nil { - log.Println(err.Error()) - } -} - func (rsl *riemannListener) read(conn net.Conn) { defer rsl.removeConnection(conn) defer conn.Close() @@ -187,7 +179,7 @@ func (rsl *riemannListener) read(conn net.Conn) { if err = binary.Read(conn, binary.BigEndian, &header); err != nil { if err.Error() != "EOF" { rsl.Log.Debugf("Failed to read header") - riemannReturnErrorResponse(conn, err.Error()) + rsl.riemannReturnErrorResponse(conn, err.Error()) return } return @@ -196,19 +188,19 @@ func (rsl *riemannListener) read(conn net.Conn) { if err = readMessages(conn, data); err != nil { rsl.Log.Debugf("Failed to read body: %s", err.Error()) - riemannReturnErrorResponse(conn, "Failed to read body") + rsl.riemannReturnErrorResponse(conn, "Failed to read body") return } if err = proto.Unmarshal(data, messagePb); err != nil { rsl.Log.Debugf("Failed to unmarshal: %s", err.Error()) - riemannReturnErrorResponse(conn, "Failed to unmarshal") + rsl.riemannReturnErrorResponse(conn, "Failed to unmarshal") return } riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) for _, m := range riemannEvents { if m.Service == "" { - riemannReturnErrorResponse(conn, "No Service Name") + rsl.riemannReturnErrorResponse(conn, "No Service Name") return } tags := make(map[string]string) @@ -224,53 +216,52 @@ func (rsl *riemannListener) read(conn net.Conn) { singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) rsl.AddMetric(singleMetric) } - riemannReturnResponse(conn) + rsl.riemannReturnResponse(conn) } } -func riemannReturnResponse(conn net.Conn) { +func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) { t := true message := new(riemangoProto.Msg) message.Ok = &t returnData, err := proto.Marshal(message) if err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) return } b := new(bytes.Buffer) if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } // send the msg length if _, err = conn.Write(b.Bytes()); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } if _, err = conn.Write(returnData); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } } -func riemannReturnErrorResponse(conn net.Conn, errorMessage string) { +func (rsl *riemannListener) riemannReturnErrorResponse(conn net.Conn, errorMessage string) { t := false message := new(riemangoProto.Msg) message.Ok = &t message.Error = &errorMessage returnData, err := proto.Marshal(message) if err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) return } b := new(bytes.Buffer) if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } // send the msg length if _, err = conn.Write(b.Bytes()); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } if _, err = conn.Write(returnData); err != nil { - log.Println("Somethign") - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } } @@ -314,7 +305,7 @@ func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { ctx, cancelFunc := context.WithCancel(context.Background()) - go processOsSignals(cancelFunc) + go rsl.processOsSignals(cancelFunc) rsl.Accumulator = acc if rsl.ServiceAddress == "" { rsl.Log.Warnf("Using default service_address tcp://:5555") @@ -367,14 +358,13 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { } // Handle cancellations from the process -func processOsSignals(cancelFunc context.CancelFunc) { +func (rsl *RiemannSocketListener) processOsSignals(cancelFunc context.CancelFunc) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt) for { sig := <-signalChan - switch sig { - case os.Interrupt: - log.Println("Signal SIGINT is received, probably due to `Ctrl-C`, exiting ...") + if sig == os.Interrupt { + rsl.Log.Warn("Signal SIGINT is received, probably due to `Ctrl-C`, exiting...") cancelFunc() return } diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 7a995fc475cb7..3f87944610312 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -7,7 +7,6 @@ import ( riemanngo "github.com/riemann/riemann-go-client" "github.com/stretchr/testify/require" - "gotest.tools/assert" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" @@ -29,26 +28,26 @@ func TestSocketListener_tcp(t *testing.T) { testStats(t) testMissingService(t) } + func testStats(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() - if err != nil { - log.Println("Error") - panic(err) - } + require.NoError(t, err) defer c.Close() result, err := riemanngo.SendEvent(c, &riemanngo.Event{ Service: "hello", }) - assert.Equal(t, result.GetOk(), true) + require.NoError(t, err) + require.Equal(t, result.GetOk(), true) } + func testMissingService(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() - if err != nil { - panic(err) - } + require.NoError(t, err) defer c.Close() result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) - assert.Equal(t, result.GetOk(), false) + require.Equal(t, false, result.GetOk()) + require.Equal(t, "No Service Name", result.GetError()) + require.NoError(t, err) } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index b5345248441ad..5f0bd1bb39e25 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -10,13 +10,13 @@ import ( "time" "github.com/gosnmp/gosnmp" + "github.com/influxdata/toml" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/toml" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type testSNMPConnection struct { @@ -139,11 +139,10 @@ func TestFieldInit(t *testing.T) { for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} err := f.init() - if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { - continue - } - assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) - assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + require.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) + + require.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + require.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) } } @@ -158,14 +157,14 @@ func TestTableInit(t *testing.T) { err := tbl.Init() require.NoError(t, err) - assert.Equal(t, "testTable", tbl.Name) + require.Equal(t, "testTable", tbl.Name) - assert.Len(t, tbl.Fields, 5) - assert.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true}) + require.Len(t, tbl.Fields, 5) + require.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) + require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) + require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) + require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) + require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true}) } func TestSnmpInit(t *testing.T) { @@ -181,13 +180,13 @@ func TestSnmpInit(t *testing.T) { err := s.init() require.NoError(t, err) - assert.Len(t, s.Tables[0].Fields, 4) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true}) + require.Len(t, s.Tables[0].Fields, 4) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true}) - assert.Equal(t, Field{ + require.Equal(t, Field{ Oid: ".1.0.0.1.1", Name: "hostname", initialized: true, @@ -220,29 +219,29 @@ func TestSnmpInit_noTranslate(t *testing.T) { err := s.init() require.NoError(t, err) - assert.Equal(t, ".1.1.1.1", s.Fields[0].Oid) - assert.Equal(t, "one", s.Fields[0].Name) - assert.Equal(t, true, s.Fields[0].IsTag) + require.Equal(t, ".1.1.1.1", s.Fields[0].Oid) + require.Equal(t, "one", s.Fields[0].Name) + require.Equal(t, true, s.Fields[0].IsTag) - assert.Equal(t, ".1.1.1.2", s.Fields[1].Oid) - assert.Equal(t, "two", s.Fields[1].Name) - assert.Equal(t, false, s.Fields[1].IsTag) + require.Equal(t, ".1.1.1.2", s.Fields[1].Oid) + require.Equal(t, "two", s.Fields[1].Name) + require.Equal(t, false, s.Fields[1].IsTag) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Oid) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Name) - assert.Equal(t, false, s.Fields[2].IsTag) + require.Equal(t, ".1.1.1.3", s.Fields[2].Oid) + require.Equal(t, ".1.1.1.3", s.Fields[2].Name) + require.Equal(t, false, s.Fields[2].IsTag) - assert.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) - assert.Equal(t, "four", s.Tables[0].Fields[0].Name) - assert.Equal(t, true, s.Tables[0].Fields[0].IsTag) + require.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) + require.Equal(t, "four", s.Tables[0].Fields[0].Name) + require.Equal(t, true, s.Tables[0].Fields[0].IsTag) - assert.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid) - assert.Equal(t, "five", s.Tables[0].Fields[1].Name) - assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) + require.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid) + require.Equal(t, "five", s.Tables[0].Fields[1].Name) + require.Equal(t, false, s.Tables[0].Fields[1].IsTag) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name) - assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) + require.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) + require.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name) + require.Equal(t, false, s.Tables[0].Fields[2].IsTag) } func TestSnmpInit_noName_noOid(t *testing.T) { @@ -276,25 +275,25 @@ func TestGetSNMPConnection_v2(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "1.2.3.4", gs.Target) - assert.EqualValues(t, 567, gs.Port) - assert.Equal(t, gosnmp.Version2c, gs.Version) - assert.Equal(t, "foo", gs.Community) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "1.2.3.4", gs.Target) + require.EqualValues(t, 567, gs.Port) + require.Equal(t, gosnmp.Version2c, gs.Version) + require.Equal(t, "foo", gs.Community) + require.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(1) require.NoError(t, err) gs = gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "1.2.3.4", gs.Target) - assert.EqualValues(t, 161, gs.Port) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "1.2.3.4", gs.Target) + require.EqualValues(t, 161, gs.Port) + require.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(2) require.NoError(t, err) gs = gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "127.0.0.1", gs.Target) - assert.EqualValues(t, 161, gs.Port) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "127.0.0.1", gs.Target) + require.EqualValues(t, 161, gs.Port) + require.Equal(t, "udp", gs.Transport) } func TestGetSNMPConnectionTCP(t *testing.T) { @@ -313,9 +312,9 @@ func TestGetSNMPConnectionTCP(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "127.0.0.1", gs.Target) - assert.EqualValues(t, 56789, gs.Port) - assert.Equal(t, "tcp", gs.Transport) + require.Equal(t, "127.0.0.1", gs.Target) + require.EqualValues(t, 56789, gs.Port) + require.Equal(t, "tcp", gs.Transport) wg.Wait() } @@ -353,20 +352,20 @@ func TestGetSNMPConnection_v3(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gs.Version, gosnmp.Version3) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) - assert.Equal(t, "1.2.3.4", gsc.Host()) - assert.EqualValues(t, 20, gs.MaxRepetitions) - assert.Equal(t, "mycontext", gs.ContextName) - assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) - assert.Equal(t, "myuser", sp.UserName) - assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) - assert.Equal(t, "password123", sp.AuthenticationPassphrase) - assert.Equal(t, gosnmp.DES, sp.PrivacyProtocol) - assert.Equal(t, "321drowssap", sp.PrivacyPassphrase) - assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) - assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) - assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + require.Equal(t, "1.2.3.4", gsc.Host()) + require.EqualValues(t, 20, gs.MaxRepetitions) + require.Equal(t, "mycontext", gs.ContextName) + require.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + require.Equal(t, "myuser", sp.UserName) + require.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + require.Equal(t, "password123", sp.AuthenticationPassphrase) + require.Equal(t, gosnmp.DES, sp.PrivacyProtocol) + require.Equal(t, "321drowssap", sp.PrivacyPassphrase) + require.Equal(t, "myengineid", sp.AuthoritativeEngineID) + require.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + require.EqualValues(t, 2, sp.AuthoritativeEngineTime) } func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { @@ -470,20 +469,20 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gs.Version, gosnmp.Version3) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) - assert.Equal(t, "1.2.3.4", gsc.Host()) - assert.EqualValues(t, 20, gs.MaxRepetitions) - assert.Equal(t, "mycontext", gs.ContextName) - assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) - assert.Equal(t, "myuser", sp.UserName) - assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) - assert.Equal(t, "password123", sp.AuthenticationPassphrase) - assert.Equal(t, tc.Algorithm, sp.PrivacyProtocol) - assert.Equal(t, "password123", sp.PrivacyPassphrase) - assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) - assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) - assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + require.Equal(t, "1.2.3.4", gsc.Host()) + require.EqualValues(t, 20, gs.MaxRepetitions) + require.Equal(t, "mycontext", gs.ContextName) + require.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + require.Equal(t, "myuser", sp.UserName) + require.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + require.Equal(t, "password123", sp.AuthenticationPassphrase) + require.Equal(t, tc.Algorithm, sp.PrivacyProtocol) + require.Equal(t, "password123", sp.PrivacyPassphrase) + require.Equal(t, "myengineid", sp.AuthoritativeEngineID) + require.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + require.EqualValues(t, 2, sp.AuthoritativeEngineTime) }) } } @@ -502,9 +501,9 @@ func TestGetSNMPConnection_caching(t *testing.T) { require.NoError(t, err) gs4, err := s.getConnection(2) require.NoError(t, err) - assert.True(t, gs1 == gs2) - assert.False(t, gs2 == gs3) - assert.False(t, gs3 == gs4) + require.Equal(t, gs1, gs2) + require.NotEqual(t, gs2, gs3) + require.NotEqual(t, gs3, gs4) } func TestGosnmpWrapper_walk_retry(t *testing.T) { @@ -554,11 +553,11 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { GoSNMP: gs, } err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) - assert.NoError(t, srvr.Close()) + require.NoError(t, srvr.Close()) wg.Wait() - assert.Error(t, err) - assert.False(t, gs.Conn == conn) - assert.Equal(t, (gs.Retries+1)*2, reqCount) + require.Error(t, err) + require.NotEqual(t, gs.Conn, conn) + require.Equal(t, (gs.Retries+1)*2, reqCount) } func TestGosnmpWrapper_get_retry(t *testing.T) { @@ -609,9 +608,9 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { _, err = gsw.Get([]string{".1.0.0"}) require.NoError(t, srvr.Close()) wg.Wait() - assert.Error(t, err) - assert.False(t, gs.Conn == conn) - assert.Equal(t, (gs.Retries+1)*2, reqCount) + require.Error(t, err) + require.NotEqual(t, gs.Conn, conn) + require.Equal(t, (gs.Retries+1)*2, reqCount) } func TestTableBuild_walk(t *testing.T) { @@ -659,7 +658,7 @@ func TestTableBuild_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "foo", @@ -703,11 +702,11 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(9.999), }, } - assert.Len(t, tb.Rows, 4) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) - assert.Contains(t, tb.Rows, rtr4) + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) } func TestTableBuild_noWalk(t *testing.T) { @@ -746,8 +745,8 @@ func TestTableBuild_noWalk(t *testing.T) { Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, Fields: map[string]interface{}{"myfield2": 234}, } - assert.Len(t, tb.Rows, 1) - assert.Contains(t, tb.Rows, rtr) + require.Len(t, tb.Rows, 1) + require.Contains(t, tb.Rows, rtr) } func TestGather(t *testing.T) { @@ -796,21 +795,21 @@ func TestGather(t *testing.T) { require.Len(t, acc.Metrics, 2) m := acc.Metrics[0] - assert.Equal(t, "mytable", m.Measurement) - assert.Equal(t, "tsc", m.Tags[s.AgentHostTag]) - assert.Equal(t, "baz", m.Tags["myfield1"]) - assert.Len(t, m.Fields, 2) - assert.Equal(t, 234, m.Fields["myfield2"]) - assert.Equal(t, "baz", m.Fields["myfield3"]) - assert.True(t, !tstart.After(m.Time)) - assert.True(t, !tstop.Before(m.Time)) + require.Equal(t, "mytable", m.Measurement) + require.Equal(t, "tsc", m.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m.Tags["myfield1"]) + require.Len(t, m.Fields, 2) + require.Equal(t, 234, m.Fields["myfield2"]) + require.Equal(t, "baz", m.Fields["myfield3"]) + require.False(t, tstart.After(m.Time)) + require.False(t, tstop.Before(m.Time)) m2 := acc.Metrics[1] - assert.Equal(t, "myOtherTable", m2.Measurement) - assert.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) - assert.Equal(t, "baz", m2.Tags["myfield1"]) - assert.Len(t, m2.Fields, 1) - assert.Equal(t, 123456, m2.Fields["myOtherField"]) + require.Equal(t, "myOtherTable", m2.Measurement) + require.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m2.Tags["myfield1"]) + require.Len(t, m2.Fields, 1) + require.Equal(t, 123456, m2.Fields["myOtherField"]) } func TestGather_host(t *testing.T) { @@ -841,7 +840,7 @@ func TestGather_host(t *testing.T) { require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] - assert.Equal(t, "baz", m.Tags["host"]) + require.Equal(t, "baz", m.Tags["host"]) } func TestFieldConvert(t *testing.T) { @@ -874,7 +873,7 @@ func TestFieldConvert(t *testing.T) { {[]byte("123123123123"), "int", int64(123123123123)}, {float32(12.3), "int", int64(12)}, {float64(12.3), "int", int64(12)}, - {int(123), "int", int64(123)}, + {123, "int", int64(123)}, {int8(123), "int", int64(123)}, {int16(123), "int", int64(123)}, {int32(123), "int", int64(123)}, @@ -899,10 +898,8 @@ func TestFieldConvert(t *testing.T) { for _, tc := range testTable { act, err := fieldConvert(tc.conv, tc.input) - if !assert.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) { - continue - } - assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) } } @@ -910,14 +907,14 @@ func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) - assert.Len(t, snmpTranslateCaches, 1) + require.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) - assert.Equal(t, mibName, stc.mibName) - assert.Equal(t, oidNum, stc.oidNum) - assert.Equal(t, oidText, stc.oidText) - assert.Equal(t, conversion, stc.conversion) - assert.Equal(t, err, stc.err) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, conversion, stc.conversion) + require.Equal(t, err, stc.err) } func TestSnmpTranslateCache_hit(t *testing.T) { @@ -931,11 +928,11 @@ func TestSnmpTranslateCache_hit(t *testing.T) { }, } mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") - assert.Equal(t, "a", mibName) - assert.Equal(t, "b", oidNum) - assert.Equal(t, "c", oidText) - assert.Equal(t, "d", conversion) - assert.Equal(t, fmt.Errorf("e"), err) + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, "d", conversion) + require.Equal(t, fmt.Errorf("e"), err) snmpTranslateCaches = nil } @@ -943,14 +940,14 @@ func TestSnmpTableCache_miss(t *testing.T) { snmpTableCaches = nil oid := ".1.0.0.0" mibName, oidNum, oidText, fields, err := snmpTable(oid) - assert.Len(t, snmpTableCaches, 1) + require.Len(t, snmpTableCaches, 1) stc := snmpTableCaches[oid] require.NotNil(t, stc) - assert.Equal(t, mibName, stc.mibName) - assert.Equal(t, oidNum, stc.oidNum) - assert.Equal(t, oidText, stc.oidText) - assert.Equal(t, fields, stc.fields) - assert.Equal(t, err, stc.err) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, fields, stc.fields) + require.Equal(t, err, stc.err) } func TestSnmpTableCache_hit(t *testing.T) { @@ -964,11 +961,11 @@ func TestSnmpTableCache_hit(t *testing.T) { }, } mibName, oidNum, oidText, fields, err := snmpTable("foo") - assert.Equal(t, "a", mibName) - assert.Equal(t, "b", oidNum) - assert.Equal(t, "c", oidText) - assert.Equal(t, []Field{{Name: "d"}}, fields) - assert.Equal(t, fmt.Errorf("e"), err) + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, []Field{{Name: "d"}}, fields) + require.Equal(t, fmt.Errorf("e"), err) } func TestTableJoin_walk(t *testing.T) { @@ -1007,7 +1004,7 @@ func TestTableJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1041,10 +1038,10 @@ func TestTableJoin_walk(t *testing.T) { "myfield3": 3, }, } - assert.Len(t, tb.Rows, 3) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) } func TestTableOuterJoin_walk(t *testing.T) { @@ -1084,7 +1081,7 @@ func TestTableOuterJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1127,11 +1124,11 @@ func TestTableOuterJoin_walk(t *testing.T) { "myfield5": 1, }, } - assert.Len(t, tb.Rows, 4) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) - assert.Contains(t, tb.Rows, rtr4) + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) } func TestTableJoinNoIndexAsTag_walk(t *testing.T) { @@ -1170,7 +1167,7 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1204,8 +1201,8 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { "myfield3": 3, }, } - assert.Len(t, tb.Rows, 3) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 3e36838c6192a..a37d76da9b038 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -62,7 +62,7 @@ package udp_listener // // } // // listener.Stop() -// // assert.Equal(t, uint64(100000), acc.NMetrics()) +// // require.Equal(t, uint64(100000), acc.NMetrics()) // // } // func TestConnectUDP(t *testing.T) { diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 44def8c6f5141..e379608ea2673 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -3,23 +3,25 @@ package filestack import ( "encoding/json" "io" - "log" "net/http" "time" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) type FilestackWebhook struct { Path string acc telegraf.Accumulator + log telegraf.Logger } -func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(fs.Path, fs.eventHandler).Methods("POST") - log.Printf("I! Started the webhooks_filestack on %s\n", fs.Path) + fs.log = log + fs.log.Infof("Started the webhooks_filestack on %s", fs.Path) fs.acc = acc } diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 2d48cbef2e5f2..585f5daa5fdc8 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -6,10 +6,10 @@ import ( "encoding/hex" "encoding/json" "io" - "log" "net/http" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) @@ -17,11 +17,14 @@ type GithubWebhook struct { Path string Secret string acc telegraf.Accumulator + log telegraf.Logger } -func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(gh.Path, gh.eventHandler).Methods("POST") - log.Printf("I! Started the webhooks_github on %s\n", gh.Path) + + gh.log = log + gh.log.Infof("Started the webhooks_github on %s", gh.Path) gh.acc = acc } @@ -35,12 +38,12 @@ func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { } if gh.Secret != "" && !checkSignature(gh.Secret, data, r.Header.Get("X-Hub-Signature")) { - log.Printf("E! Fail to check the github webhook signature\n") + gh.log.Error("Fail to check the github webhook signature") w.WriteHeader(http.StatusBadRequest) return } - e, err := NewEvent(data, eventType) + e, err := gh.NewEvent(data, eventType) if err != nil { w.WriteHeader(http.StatusBadRequest) return @@ -69,8 +72,8 @@ func (e *newEventError) Error() string { return e.s } -func NewEvent(data []byte, name string) (Event, error) { - log.Printf("D! New %v event received", name) +func (gh *GithubWebhook) NewEvent(data []byte, name string) (Event, error) { + gh.log.Debugf("New %v event received", name) switch name { case "commit_comment": return generateEvent(data, &CommitCommentEvent{}) diff --git a/plugins/inputs/webhooks/github/github_webhooks_test.go b/plugins/inputs/webhooks/github/github_webhooks_test.go index 65041e4a06125..a13e568671f83 100644 --- a/plugins/inputs/webhooks/github/github_webhooks_test.go +++ b/plugins/inputs/webhooks/github/github_webhooks_test.go @@ -11,7 +11,7 @@ import ( func GithubWebhookRequest(event string, jsonString string, t *testing.T) { var acc testutil.Accumulator - gh := &GithubWebhook{Path: "/github", acc: &acc} + gh := &GithubWebhook{Path: "/github", acc: &acc, log: testutil.Logger{}} req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString)) req.Header.Add("X-Github-Event", event) w := httptest.NewRecorder() @@ -23,7 +23,7 @@ func GithubWebhookRequest(event string, jsonString string, t *testing.T) { func GithubWebhookRequestWithSignature(event string, jsonString string, t *testing.T, signature string, expectedStatus int) { var acc testutil.Accumulator - gh := &GithubWebhook{Path: "/github", Secret: "signature", acc: &acc} + gh := &GithubWebhook{Path: "/github", Secret: "signature", acc: &acc, log: testutil.Logger{}} req, _ := http.NewRequest("POST", "/github", strings.NewReader(jsonString)) req.Header.Add("X-Github-Event", event) req.Header.Add("X-Hub-Signature", signature) diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index 67ba86908d1a1..b23cc2a1dd20b 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -3,25 +3,27 @@ package mandrill import ( "encoding/json" "io" - "log" "net/http" "net/url" "time" - "github.com/gorilla/mux" "github.com/influxdata/telegraf" + + "github.com/gorilla/mux" ) type MandrillWebhook struct { Path string acc telegraf.Accumulator + log telegraf.Logger } -func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (md *MandrillWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(md.Path, md.returnOK).Methods("HEAD") router.HandleFunc(md.Path, md.eventHandler).Methods("POST") - log.Printf("I! Started the webhooks_mandrill on %s\n", md.Path) + md.log = log + md.log.Infof("Started the webhooks_mandrill on %s", md.Path) md.acc = acc } diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go index 5aa8ecaf83fc2..5fac939bbe258 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -3,22 +3,24 @@ package papertrail import ( "encoding/json" "fmt" - "log" "net/http" "time" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) type PapertrailWebhook struct { Path string acc telegraf.Accumulator + log telegraf.Logger } -func (pt *PapertrailWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (pt *PapertrailWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(pt.Path, pt.eventHandler).Methods("POST") - log.Printf("I! Started the papertrail_webhook on %s", pt.Path) + pt.log = log + pt.log.Infof("Started the papertrail_webhook on %s", pt.Path) pt.acc = acc } @@ -64,7 +66,7 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request } pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) } - } else if payload.Counts != nil { + } else if payload.Counts != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code // Handle count-based payload for _, c := range payload.Counts { for ts, count := range *c.TimeSeries { diff --git a/plugins/inputs/webhooks/particle/particle_webhooks.go b/plugins/inputs/webhooks/particle/particle_webhooks.go index ad93ea7c56477..4be5126860865 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks.go @@ -6,6 +6,7 @@ import ( "time" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) @@ -38,10 +39,13 @@ func (e *event) Time() (time.Time, error) { type ParticleWebhook struct { Path string acc telegraf.Accumulator + log telegraf.Logger } -func (rb *ParticleWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (rb *ParticleWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST") + rb.log = log + rb.log.Infof("Started the webhooks_particle on %s", rb.Path) rb.acc = acc } diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index d9c1323cdd608..3b8d2b02cac1f 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -4,22 +4,24 @@ import ( "encoding/json" "errors" "io" - "log" "net/http" "time" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) type RollbarWebhook struct { Path string acc telegraf.Accumulator + log telegraf.Logger } -func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { +func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) { router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST") - log.Printf("I! Started the webhooks_rollbar on %s\n", rb.Path) + rb.log = log + rb.log.Infof("Started the webhooks_rollbar on %s", rb.Path) rb.acc = acc } diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index a6f02beffd5d8..2156d9309abac 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -7,9 +7,9 @@ import ( "reflect" "github.com/gorilla/mux" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/webhooks/filestack" "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" "github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill" @@ -19,7 +19,7 @@ import ( ) type Webhook interface { - Register(router *mux.Router, acc telegraf.Accumulator) + Register(router *mux.Router, acc telegraf.Accumulator, log telegraf.Logger) } func init() { @@ -79,7 +79,7 @@ func (wb *Webhooks) Gather(_ telegraf.Accumulator) error { return nil } -// Looks for fields which implement Webhook interface +// AvailableWebhooks Looks for fields which implement Webhook interface func (wb *Webhooks) AvailableWebhooks() []Webhook { webhooks := make([]Webhook, 0) s := reflect.ValueOf(wb).Elem() @@ -104,7 +104,7 @@ func (wb *Webhooks) Start(acc telegraf.Accumulator) error { r := mux.NewRouter() for _, webhook := range wb.AvailableWebhooks() { - webhook.Register(r, acc) + webhook.Register(r, acc, wb.Log) } wb.srv = &http.Server{Handler: r} From 59eeddb41e85dffc5bc0df9b7ba3e7fd4c480499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 30 Nov 2021 21:50:00 +0100 Subject: [PATCH 089/133] fix: Linter fixes for plugins/serializers/[a-z]* (#10181) Co-authored-by: Pawel Zak --- plugins/serializers/carbon2/carbon2.go | 28 +++---- plugins/serializers/carbon2/carbon2_test.go | 17 ++-- plugins/serializers/graphite/graphite_test.go | 79 +++++++++---------- plugins/serializers/influx/influx.go | 14 ++-- plugins/serializers/json/json.go | 3 +- plugins/serializers/json/json_test.go | 31 +++----- plugins/serializers/msgpack/metric.go | 44 +++++------ plugins/serializers/msgpack/metric_test.go | 42 +++++----- plugins/serializers/msgpack/msgpack_test.go | 43 +++++----- plugins/serializers/nowmetric/nowmetric.go | 15 ++-- .../serializers/nowmetric/nowmetric_test.go | 39 ++++----- plugins/serializers/prometheus/collection.go | 17 ++-- plugins/serializers/prometheus/convert.go | 9 ++- .../prometheusremotewrite.go | 17 ++-- plugins/serializers/registry.go | 2 +- .../splunkmetric/splunkmetric_test.go | 60 +++++++------- plugins/serializers/wavefront/wavefront.go | 8 +- .../serializers/wavefront/wavefront_test.go | 27 ++++--- 18 files changed, 237 insertions(+), 258 deletions(-) diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go index 4eb5798d64a69..db8c38222972e 100644 --- a/plugins/serializers/carbon2/carbon2.go +++ b/plugins/serializers/carbon2/carbon2.go @@ -65,7 +65,7 @@ func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { var batch bytes.Buffer for _, metric := range metrics { - batch.Write(s.createObject(metric)) + batch.Write(s.createObject(metric)) //nolint:revive // from buffer.go: "err is always nil" } return batch.Bytes(), nil } @@ -83,31 +83,27 @@ func (s *Serializer) createObject(metric telegraf.Metric) []byte { switch metricsFormat { case Carbon2FormatFieldSeparate: - m.WriteString(serializeMetricFieldSeparate( - name, fieldName, - )) + m.WriteString(serializeMetricFieldSeparate(name, fieldName)) //nolint:revive // from buffer.go: "err is always nil" case Carbon2FormatMetricIncludesField: - m.WriteString(serializeMetricIncludeField( - name, fieldName, - )) + m.WriteString(serializeMetricIncludeField(name, fieldName)) //nolint:revive // from buffer.go: "err is always nil" } for _, tag := range metric.TagList() { - m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) - m.WriteString("=") + m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString("=") //nolint:revive // from buffer.go: "err is always nil" value := tag.Value if len(value) == 0 { value = "null" } - m.WriteString(strings.Replace(value, " ", "_", -1)) - m.WriteString(" ") + m.WriteString(strings.Replace(value, " ", "_", -1)) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" } - m.WriteString(" ") - m.WriteString(formatValue(fieldValue)) - m.WriteString(" ") - m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10)) - m.WriteString("\n") + m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(formatValue(fieldValue)) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(" ") //nolint:revive // from buffer.go: "err is always nil" + m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10)) //nolint:revive // from buffer.go: "err is always nil" + m.WriteString("\n") //nolint:revive // from buffer.go: "err is always nil" } return m.Bytes() } diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go index 86f1b66db8932..babc75f6c43a2 100644 --- a/plugins/serializers/carbon2/carbon2_test.go +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -44,7 +43,7 @@ func TestSerializeMetricFloat(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -81,7 +80,7 @@ func TestSerializeMetricWithEmptyStringTag(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -118,7 +117,7 @@ func TestSerializeWithSpaces(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -155,7 +154,7 @@ func TestSerializeMetricInt(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -192,7 +191,7 @@ func TestSerializeMetricString(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -248,7 +247,7 @@ func TestSerializeMetricBool(t *testing.T) { buf, err := s.Serialize(tc.metric) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -291,7 +290,7 @@ metric=cpu_value 42 0 buf, err := s.SerializeBatch(metrics) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } @@ -400,7 +399,7 @@ func TestSerializeMetricIsProperlySanitized(t *testing.T) { buf, err := s.Serialize(m) require.NoError(t, err) - assert.Equal(t, tc.expected, string(buf)) + require.Equal(t, tc.expected, string(buf)) }) } } diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index f2fd3b7f150a9..4fe348e6fd365 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -55,9 +54,9 @@ func TestGraphiteTags(t *testing.T) { tags2 := buildTags(m2.Tags()) tags3 := buildTags(m3.Tags()) - assert.Equal(t, "192_168_0_1", tags1) - assert.Equal(t, "first.second.192_168_0_1", tags2) - assert.Equal(t, "first.second", tags3) + require.Equal(t, "192_168_0_1", tags1) + require.Equal(t, "first.second.192_168_0_1", tags2) + require.Equal(t, "first.second", tags3) } func TestSerializeMetricNoHost(t *testing.T) { @@ -82,7 +81,7 @@ func TestSerializeMetricNoHost(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { @@ -110,7 +109,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricHost(t *testing.T) { @@ -136,7 +135,7 @@ func TestSerializeMetricHost(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { @@ -157,8 +156,8 @@ func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { "cp* tags.measurement.host.field", "new_cpu tags.host.measurement.field", }) - assert.NoError(t, err) - assert.Equal(t, defaultTemplate, "") + require.NoError(t, err) + require.Equal(t, defaultTemplate, "") s := GraphiteSerializer{ Templates: templates, @@ -170,7 +169,7 @@ func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { buf = append(buf, buf2...) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) + require.NoError(t, err) expS := []string{ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), @@ -180,7 +179,7 @@ func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { @@ -201,8 +200,8 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { "cp* tags.measurement.host.field", "tags.host.measurement.field", }) - assert.NoError(t, err) - assert.Equal(t, defaultTemplate, "tags.host.measurement.field") + require.NoError(t, err) + require.Equal(t, defaultTemplate, "tags.host.measurement.field") s := GraphiteSerializer{ Templates: templates, @@ -215,7 +214,7 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { buf = append(buf, buf2...) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.NoError(t, err) + require.NoError(t, err) expS := []string{ fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), @@ -225,7 +224,7 @@ func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricHostWithTagSupport(t *testing.T) { @@ -254,7 +253,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } // test that a field named "value" gets ignored. @@ -277,7 +276,7 @@ func TestSerializeValueField(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeValueFieldWithTagSupport(t *testing.T) { @@ -302,7 +301,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { expS := []string{ fmt.Sprintf("cpu;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } // test that a field named "value" gets ignored in middle of template. @@ -327,7 +326,7 @@ func TestSerializeValueField2(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeValueString(t *testing.T) { @@ -347,7 +346,7 @@ func TestSerializeValueString(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.Equal(t, "", mS[0]) + require.Equal(t, "", mS[0]) } func TestSerializeValueStringWithTagSupport(t *testing.T) { @@ -368,7 +367,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") - assert.Equal(t, "", mS[0]) + require.Equal(t, "", mS[0]) } func TestSerializeValueBoolean(t *testing.T) { @@ -396,7 +395,7 @@ func TestSerializeValueBoolean(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeValueBooleanWithTagSupport(t *testing.T) { @@ -425,7 +424,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeValueUnsigned(t *testing.T) { @@ -465,7 +464,7 @@ func TestSerializeFieldWithSpaces(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { @@ -490,7 +489,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } // test that tags with spaces get fixed. @@ -515,7 +514,7 @@ func TestSerializeTagWithSpaces(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.cpu_0.us-west-2.cpu.field_with_spaces 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { @@ -540,7 +539,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu_0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeTagWithSpacesWithTagSupportCompatibleSanitize(t *testing.T) { @@ -566,7 +565,7 @@ func TestSerializeTagWithSpacesWithTagSupportCompatibleSanitize(t *testing.T) { expS := []string{ fmt.Sprintf("cpu.field_with_spaces;cpu=cpu\\ 0;datacenter=us-west-2;host=localhost 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } // test that a field named "value" gets ignored at beginning of template. @@ -591,7 +590,7 @@ func TestSerializeValueField3(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.cpu0.us-west-2.cpu 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } // test that a field named "value" gets ignored at beginning of template. @@ -616,7 +615,7 @@ func TestSerializeValueField5(t *testing.T) { expS := []string{ fmt.Sprintf("localhost.us-west-2.cpu0.cpu 91.5 %d", now.Unix()), } - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricPrefix(t *testing.T) { @@ -642,7 +641,7 @@ func TestSerializeMetricPrefix(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { @@ -672,7 +671,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { } sort.Strings(mS) sort.Strings(expS) - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeBucketNameNoHost(t *testing.T) { @@ -689,7 +688,7 @@ func TestSerializeBucketNameNoHost(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), "", "") expS := "cpu0.us-west-2.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeBucketNameHost(t *testing.T) { @@ -702,7 +701,7 @@ func TestSerializeBucketNameHost(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), "", "") expS := "localhost.cpu0.us-west-2.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeBucketNamePrefix(t *testing.T) { @@ -715,7 +714,7 @@ func TestSerializeBucketNamePrefix(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), "", "prefix") expS := "prefix.localhost.cpu0.us-west-2.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestTemplate1(t *testing.T) { @@ -728,7 +727,7 @@ func TestTemplate1(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), template1, "") expS := "cpu0.us-west-2.localhost.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestTemplate2(t *testing.T) { @@ -741,7 +740,7 @@ func TestTemplate2(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), template2, "") expS := "localhost.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestTemplate3(t *testing.T) { @@ -754,7 +753,7 @@ func TestTemplate3(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), template3, "") expS := "localhost.cpu0.us-west-2.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestTemplate4(t *testing.T) { @@ -767,7 +766,7 @@ func TestTemplate4(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), template4, "") expS := "localhost.cpu0.us-west-2.cpu" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestTemplate6(t *testing.T) { @@ -780,7 +779,7 @@ func TestTemplate6(t *testing.T) { mS := SerializeBucketName(m.Name(), m.Tags(), template6, "") expS := "localhost.cpu0.us-west-2.cpu.FIELDNAME" - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestClean(t *testing.T) { diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index 978614376dabb..f31c8f2b683eb 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -80,8 +80,8 @@ func NewSerializer() *Serializer { return serializer } -func (s *Serializer) SetMaxLineBytes(bytes int) { - s.maxLineBytes = bytes +func (s *Serializer) SetMaxLineBytes(maxLineBytes int) { + s.maxLineBytes = maxLineBytes } func (s *Serializer) SetFieldSortOrder(order FieldSortOrder) { @@ -135,7 +135,7 @@ func (s *Serializer) writeString(w io.Writer, str string) error { return err } -func (s *Serializer) write(w io.Writer, b []byte) error { +func (s *Serializer) writeBytes(w io.Writer, b []byte) error { n, err := w.Write(b) s.bytesWritten += n return err @@ -247,7 +247,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return s.newMetricError(NeedMoreSpace) } - err = s.write(w, s.footer) + err = s.writeBytes(w, s.footer) if err != nil { return err } @@ -262,7 +262,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } if firstField { - err = s.write(w, s.header) + err = s.writeBytes(w, s.header) if err != nil { return err } @@ -273,7 +273,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } } - err = s.write(w, s.pair) + err = s.writeBytes(w, s.pair) if err != nil { return err } @@ -286,7 +286,7 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return s.newMetricError(NoFields) } - return s.write(w, s.footer) + return s.writeBytes(w, s.footer) } func (s *Serializer) newMetricError(reason string) *MetricError { diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index 6db2a43ee231a..a2f32f37a460d 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -61,8 +61,7 @@ func (s *Serializer) createObject(metric telegraf.Metric) map[string]interface{} fields := make(map[string]interface{}, len(metric.FieldList())) for _, field := range metric.FieldList() { - switch fv := field.Value.(type) { - case float64: + if fv, ok := field.Value.(float64); ok { // JSON does not support these special values if math.IsNaN(fv) || math.IsInf(fv, 0) { continue diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index be939243904eb..eaecb960360b3 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -6,20 +6,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestSerializeMetricFloat(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -33,9 +26,9 @@ func TestSerializeMetricFloat(t *testing.T) { s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":91.5},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") - assert.Equal(t, string(expS), string(buf)) + require.Equal(t, string(expS), string(buf)) } func TestSerialize_TimestampUnits(t *testing.T) { @@ -112,10 +105,10 @@ func TestSerializeMetricInt(t *testing.T) { s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") - assert.Equal(t, string(expS), string(buf)) + require.Equal(t, string(expS), string(buf)) } func TestSerializeMetricString(t *testing.T) { @@ -131,10 +124,10 @@ func TestSerializeMetricString(t *testing.T) { s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":"foobar"},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") - assert.Equal(t, string(expS), string(buf)) + require.Equal(t, string(expS), string(buf)) } func TestSerializeMultiFields(t *testing.T) { @@ -151,10 +144,10 @@ func TestSerializeMultiFields(t *testing.T) { s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"usage_idle":90,"usage_total":8559615},"name":"cpu","tags":{"cpu":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") - assert.Equal(t, string(expS), string(buf)) + require.Equal(t, string(expS), string(buf)) } func TestSerializeMetricWithEscapes(t *testing.T) { @@ -169,10 +162,10 @@ func TestSerializeMetricWithEscapes(t *testing.T) { s, _ := NewSerializer(0, "") buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := []byte(fmt.Sprintf(`{"fields":{"U,age=Idle":90},"name":"My CPU","tags":{"cpu tag":"cpu0"},"timestamp":%d}`, now.Unix()) + "\n") - assert.Equal(t, string(expS), string(buf)) + require.Equal(t, string(expS), string(buf)) } func TestSerializeBatch(t *testing.T) { diff --git a/plugins/serializers/msgpack/metric.go b/plugins/serializers/msgpack/metric.go index 6b8a00878b6a8..12d898cfb7f1a 100644 --- a/plugins/serializers/msgpack/metric.go +++ b/plugins/serializers/msgpack/metric.go @@ -42,9 +42,9 @@ func (*MessagePackTime) ExtensionType() int8 { // 32bits: [1970-01-01 00:00:00 UTC, 2106-02-07 06:28:16 UTC) range. If the nanoseconds part is 0 // 64bits: [1970-01-01 00:00:00.000000000 UTC, 2514-05-30 01:53:04.000000000 UTC) range. // 96bits: [-584554047284-02-23 16:59:44 UTC, 584554051223-11-09 07:00:16.000000000 UTC) range. -func (t *MessagePackTime) Len() int { - sec := t.time.Unix() - nsec := t.time.Nanosecond() +func (z *MessagePackTime) Len() int { + sec := z.time.Unix() + nsec := z.time.Nanosecond() if sec < 0 || sec >= (1<<34) { // 96 bits encoding return 12 @@ -56,21 +56,21 @@ func (t *MessagePackTime) Len() int { } // MarshalBinaryTo implements the Extension interface -func (t *MessagePackTime) MarshalBinaryTo(buf []byte) error { - len := t.Len() +func (z *MessagePackTime) MarshalBinaryTo(buf []byte) error { + length := z.Len() - if len == 4 { - sec := t.time.Unix() + if length == 4 { + sec := z.time.Unix() binary.BigEndian.PutUint32(buf, uint32(sec)) - } else if len == 8 { - sec := t.time.Unix() - nsec := t.time.Nanosecond() + } else if length == 8 { + sec := z.time.Unix() + nsec := z.time.Nanosecond() data := uint64(nsec)<<34 | (uint64(sec) & 0x03_ffff_ffff) binary.BigEndian.PutUint64(buf, data) - } else if len == 12 { - sec := t.time.Unix() - nsec := t.time.Nanosecond() + } else if length == 12 { + sec := z.time.Unix() + nsec := z.time.Nanosecond() binary.BigEndian.PutUint32(buf, uint32(nsec)) binary.BigEndian.PutUint64(buf[4:], uint64(sec)) @@ -80,24 +80,24 @@ func (t *MessagePackTime) MarshalBinaryTo(buf []byte) error { } // UnmarshalBinary implements the Extension interface -func (t *MessagePackTime) UnmarshalBinary(buf []byte) error { - len := len(buf) +func (z *MessagePackTime) UnmarshalBinary(buf []byte) error { + length := len(buf) - if len == 4 { + if length == 4 { sec := binary.BigEndian.Uint32(buf) - t.time = time.Unix(int64(sec), 0) - } else if len == 8 { + z.time = time.Unix(int64(sec), 0) + } else if length == 8 { data := binary.BigEndian.Uint64(buf) nsec := (data & 0xfffffffc_00000000) >> 34 - sec := (data & 0x00000003_ffffffff) + sec := data & 0x00000003_ffffffff - t.time = time.Unix(int64(sec), int64(nsec)) - } else if len == 12 { + z.time = time.Unix(int64(sec), int64(nsec)) + } else if length == 12 { nsec := binary.BigEndian.Uint32(buf) sec := binary.BigEndian.Uint64(buf[4:]) - t.time = time.Unix(int64(sec), int64(nsec)) + z.time = time.Unix(int64(sec), int64(nsec)) } return nil diff --git a/plugins/serializers/msgpack/metric_test.go b/plugins/serializers/msgpack/metric_test.go index e0ea25ebc88a7..e85fe4a020feb 100644 --- a/plugins/serializers/msgpack/metric_test.go +++ b/plugins/serializers/msgpack/metric_test.go @@ -6,24 +6,25 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMsgPackTime32(t *testing.T) { // Maximum of 4 bytes encodable time var sec int64 = 0xFFFFFFFF - var nsec int64 = 0 + var nsec int64 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - assert.Equal(t, t1.Len(), 4) + require.Equal(t, t1.Len(), 4) buf := make([]byte, t1.Len()) - assert.NoError(t, t1.MarshalBinaryTo(buf)) + require.NoError(t, t1.MarshalBinaryTo(buf)) t2 := new(MessagePackTime) - t2.UnmarshalBinary(buf) + err := t2.UnmarshalBinary(buf) + require.NoError(t, err) - assert.Equal(t, t1.time, t2.time) + require.Equal(t, t1.time, t2.time) } func TestMsgPackTime64(t *testing.T) { @@ -32,15 +33,16 @@ func TestMsgPackTime64(t *testing.T) { var nsec int64 = 999999999 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - assert.Equal(t, t1.Len(), 8) + require.Equal(t, t1.Len(), 8) buf := make([]byte, t1.Len()) - assert.NoError(t, t1.MarshalBinaryTo(buf)) + require.NoError(t, t1.MarshalBinaryTo(buf)) t2 := new(MessagePackTime) - t2.UnmarshalBinary(buf) + err := t2.UnmarshalBinary(buf) + require.NoError(t, err) - assert.Equal(t, t1.time, t2.time) + require.Equal(t, t1.time, t2.time) } func TestMsgPackTime96(t *testing.T) { @@ -49,26 +51,28 @@ func TestMsgPackTime96(t *testing.T) { var nsec int64 = 111111111 t1 := MessagePackTime{time: time.Unix(sec, nsec)} - assert.Equal(t, t1.Len(), 12) + require.Equal(t, t1.Len(), 12) buf := make([]byte, t1.Len()) - assert.NoError(t, t1.MarshalBinaryTo(buf)) + require.NoError(t, t1.MarshalBinaryTo(buf)) t2 := new(MessagePackTime) - t2.UnmarshalBinary(buf) + err := t2.UnmarshalBinary(buf) + require.NoError(t, err) - assert.True(t, t1.time.Equal(t2.time)) + require.True(t, t1.time.Equal(t2.time)) // Testing the default value: 0001-01-01T00:00:00Z t1 = MessagePackTime{} - assert.Equal(t, t1.Len(), 12) - assert.NoError(t, t1.MarshalBinaryTo(buf)) + require.Equal(t, t1.Len(), 12) + require.NoError(t, t1.MarshalBinaryTo(buf)) t2 = new(MessagePackTime) - t2.UnmarshalBinary(buf) + err = t2.UnmarshalBinary(buf) + require.NoError(t, err) - assert.True(t, t1.time.Equal(t2.time)) + require.True(t, t1.time.Equal(t2.time)) } func TestMsgPackTimeEdgeCases(t *testing.T) { @@ -138,6 +142,6 @@ func TestMsgPackTimeEdgeCases(t *testing.T) { buf = buf[:0] buf, _ = m.MarshalMsg(buf) - assert.Equal(t, expected[i], buf[12:len(buf)-14]) + require.Equal(t, expected[i], buf[12:len(buf)-14]) } } diff --git a/plugins/serializers/msgpack/msgpack_test.go b/plugins/serializers/msgpack/msgpack_test.go index 36cc66ea52c59..dc0ecf9432699 100644 --- a/plugins/serializers/msgpack/msgpack_test.go +++ b/plugins/serializers/msgpack/msgpack_test.go @@ -3,10 +3,11 @@ package msgpack import ( "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func toTelegrafMetric(m Metric) telegraf.Metric { @@ -20,13 +21,13 @@ func TestSerializeMetricInt(t *testing.T) { s := Serializer{} var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) m2 := &Metric{} left, err := m2.UnmarshalMsg(buf) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, len(left), 0) + require.Equal(t, len(left), 0) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) } @@ -37,61 +38,61 @@ func TestSerializeMetricString(t *testing.T) { s := Serializer{} var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) m2 := &Metric{} left, err := m2.UnmarshalMsg(buf) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, len(left), 0) + require.Equal(t, len(left), 0) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) } func TestSerializeMultiFields(t *testing.T) { - m := testutil.TestMetric(int(90)) + m := testutil.TestMetric(90) m.AddField("value2", 8559615) s := Serializer{} var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) m2 := &Metric{} left, err := m2.UnmarshalMsg(buf) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, len(left), 0) + require.Equal(t, len(left), 0) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) } func TestSerializeMetricWithEscapes(t *testing.T) { - m := testutil.TestMetric(int(90)) + m := testutil.TestMetric(90) m.AddField("U,age=Idle", int64(90)) m.AddTag("cpu tag", "cpu0") s := Serializer{} var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) m2 := &Metric{} left, err := m2.UnmarshalMsg(buf) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, len(left), 0) + require.Equal(t, len(left), 0) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*m2)) } func TestSerializeMultipleMetric(t *testing.T) { - m := testutil.TestMetric(int(90)) + m := testutil.TestMetric(90) s := Serializer{} encoded, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) // Multiple metrics in continous bytes stream var buf []byte @@ -105,27 +106,27 @@ func TestSerializeMultipleMetric(t *testing.T) { decodeM := &Metric{} left, err = decodeM.UnmarshalMsg(left) - assert.NoError(t, err) + require.NoError(t, err) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) } } func TestSerializeBatch(t *testing.T) { - m := testutil.TestMetric(int(90)) + m := testutil.TestMetric(90) metrics := []telegraf.Metric{m, m, m, m} s := Serializer{} buf, err := s.SerializeBatch(metrics) - assert.NoError(t, err) + require.NoError(t, err) left := buf for len(left) > 0 { decodeM := &Metric{} left, err = decodeM.UnmarshalMsg(left) - assert.NoError(t, err) + require.NoError(t, err) testutil.RequireMetricEqual(t, m, toTelegrafMetric(*decodeM)) } } diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go index b1960bb7a9f57..fe0c4dc492c64 100644 --- a/plugins/serializers/nowmetric/nowmetric.go +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -46,9 +46,9 @@ func NewSerializer() (*serializer, error) { func (s *serializer) Serialize(metric telegraf.Metric) (out []byte, err error) { serialized, err := s.createObject(metric) if err != nil { - return []byte{}, nil + return []byte{}, err } - return serialized, err + return serialized, nil } func (s *serializer) SerializeBatch(metrics []telegraf.Metric) (out []byte, err error) { @@ -56,7 +56,7 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) (out []byte, err for _, metric := range metrics { m, err := s.createObject(metric) if err != nil { - return nil, fmt.Errorf("D! [serializer.nowmetric] Dropping invalid metric: %s", metric.Name()) + return nil, fmt.Errorf("dropping invalid metric: %s", metric.Name()) } else if m != nil { objects = append(objects, m...) } @@ -97,7 +97,7 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { } // Format timestamp to UNIX epoch - oimetric.Timestamp = (metric.Time().UnixNano() / int64(time.Millisecond)) + oimetric.Timestamp = metric.Time().UnixNano() / int64(time.Millisecond) // Loop of fields value pair and build datapoint for each of them for _, field := range metric.FieldList() { @@ -129,9 +129,6 @@ func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { } func verifyValue(v interface{}) bool { - switch v.(type) { - case string: - return false - } - return true + _, ok := v.(string) + return !ok } diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go index b9e7914a6adbf..05f68a23115a7 100644 --- a/plugins/serializers/nowmetric/nowmetric_test.go +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -6,19 +6,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestSerializeMetricFloat(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -32,9 +25,9 @@ func TestSerializeMetricFloat(t *testing.T) { s, _ := NewSerializer() var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) - expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) - assert.Equal(t, string(expS), string(buf)) + require.NoError(t, err) + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, now.UnixNano()/int64(time.Millisecond))) + require.Equal(t, string(expS), string(buf)) } func TestSerialize_TimestampUnits(t *testing.T) { @@ -95,10 +88,10 @@ func TestSerializeMetricInt(t *testing.T) { s, _ := NewSerializer() var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) - expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) - assert.Equal(t, string(expS), string(buf)) + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, now.UnixNano()/int64(time.Millisecond))) + require.Equal(t, string(expS), string(buf)) } func TestSerializeMetricString(t *testing.T) { @@ -114,9 +107,9 @@ func TestSerializeMetricString(t *testing.T) { s, _ := NewSerializer() var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, "null", string(buf)) + require.Equal(t, "null", string(buf)) } func TestSerializeMultiFields(t *testing.T) { @@ -138,10 +131,10 @@ func TestSerializeMultiFields(t *testing.T) { s, _ := NewSerializer() var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) - expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)), (now.UnixNano() / int64(time.Millisecond)))) - assert.Equal(t, string(expS), string(buf)) + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, now.UnixNano()/int64(time.Millisecond), now.UnixNano()/int64(time.Millisecond))) + require.Equal(t, string(expS), string(buf)) } func TestSerializeMetricWithEscapes(t *testing.T) { @@ -156,10 +149,10 @@ func TestSerializeMetricWithEscapes(t *testing.T) { s, _ := NewSerializer() buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) - expS := []byte(fmt.Sprintf(`[{"metric_type":"U,age=Idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) - assert.Equal(t, string(expS), string(buf)) + expS := []byte(fmt.Sprintf(`[{"metric_type":"U,age=Idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, now.UnixNano()/int64(time.Millisecond))) + require.Equal(t, string(expS), string(buf)) } func TestSerializeBatch(t *testing.T) { diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index e160107101ab7..9f23544d66cb0 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -7,9 +7,10 @@ import ( "strings" "time" - "github.com/influxdata/telegraf" dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" + + "github.com/influxdata/telegraf" ) const helpString = "Telegraf collected metric" @@ -86,10 +87,10 @@ type MetricKey uint64 func MakeMetricKey(labels []LabelPair) MetricKey { h := fnv.New64a() for _, label := range labels { - h.Write([]byte(label.Name)) - h.Write([]byte("\x00")) - h.Write([]byte(label.Value)) - h.Write([]byte("\x00")) + h.Write([]byte(label.Name)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\x00")) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(label.Value)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\x00")) //nolint:revive // from hash.go: "It never returns an error" } return MetricKey(h.Sum64()) } @@ -357,8 +358,7 @@ func (c *Collection) GetEntries(order MetricSortOrder) []Entry { entries = append(entries, entry) } - switch order { - case SortMetrics: + if order == SortMetrics { sort.Slice(entries, func(i, j int) bool { lhs := entries[i].Family rhs := entries[j].Family @@ -378,8 +378,7 @@ func (c *Collection) GetMetrics(entry Entry, order MetricSortOrder) []*Metric { metrics = append(metrics, metric) } - switch order { - case SortMetrics: + if order == SortMetrics { sort.Slice(metrics, func(i, j int) bool { lhs := metrics[i].Labels rhs := metrics[j].Labels diff --git a/plugins/serializers/prometheus/convert.go b/plugins/serializers/prometheus/convert.go index 131ac31b8036c..dc5e3b9622dee 100644 --- a/plugins/serializers/prometheus/convert.go +++ b/plugins/serializers/prometheus/convert.go @@ -4,8 +4,9 @@ import ( "strings" "unicode" - "github.com/influxdata/telegraf" dto "github.com/prometheus/client_model/go" + + "github.com/influxdata/telegraf" ) type Table struct { @@ -89,13 +90,13 @@ func sanitize(name string, table Table) (string, bool) { switch { case i == 0: if unicode.In(r, table.First) { - b.WriteRune(r) + b.WriteRune(r) //nolint:revive // from builder.go: "It returns the length of r and a nil error." } default: if unicode.In(r, table.Rest) { - b.WriteRune(r) + b.WriteRune(r) //nolint:revive // from builder.go: "It returns the length of r and a nil error." } else { - b.WriteString("_") + b.WriteString("_") //nolint:revive // from builder.go: "It returns the length of s and a nil error." } } } diff --git a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go index b6dd180dba30b..1396d033ae448 100644 --- a/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go +++ b/plugins/serializers/prometheusremotewrite/prometheusremotewrite.go @@ -10,10 +10,10 @@ import ( "time" "github.com/golang/snappy" - "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/prometheus/prometheus/prompb" "github.com/influxdata/telegraf" - "github.com/prometheus/prometheus/prompb" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" ) type MetricKey uint64 @@ -210,8 +210,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { i++ } - switch s.config.MetricSortOrder { - case SortMetrics: + if s.config.MetricSortOrder == SortMetrics { sort.Slice(promTS, func(i, j int) bool { lhs := promTS[i].Labels rhs := promTS[j].Labels @@ -241,7 +240,7 @@ func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return nil, fmt.Errorf("unable to marshal protobuf: %v", err) } encoded := snappy.Encode(nil, data) - buf.Write(encoded) + buf.Write(encoded) //nolint:revive // from buffer.go: "err is always nil" return buf.Bytes(), nil } @@ -320,10 +319,10 @@ func (s *Serializer) createLabels(metric telegraf.Metric) []prompb.Label { func MakeMetricKey(labels []prompb.Label) MetricKey { h := fnv.New64a() for _, label := range labels { - h.Write([]byte(label.Name)) - h.Write([]byte("\x00")) - h.Write([]byte(label.Value)) - h.Write([]byte("\x00")) + h.Write([]byte(label.Name)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\x00")) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte(label.Value)) //nolint:revive // from hash.go: "It never returns an error" + h.Write([]byte("\x00")) //nolint:revive // from hash.go: "It never returns an error" } return MetricKey(h.Sum64()) } diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index b17364e66f0a6..144be0c379156 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -142,7 +142,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "msgpack": serializer, err = NewMsgpackSerializer() default: - err = fmt.Errorf("Invalid data format: %s", config.DataFormat) + err = fmt.Errorf("invalid data format: %s", config.DataFormat) } return serializer, err } diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go index c088d99f7f1a4..7b60598cd6402 100644 --- a/plugins/serializers/splunkmetric/splunkmetric_test.go +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -4,18 +4,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" ) -func MustMetric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestSerializeMetricFloat(t *testing.T) { // Test sub-second time now := time.Unix(1529875740, 819000000) @@ -30,9 +24,9 @@ func TestSerializeMetricFloat(t *testing.T) { s, _ := NewSerializer(false, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricFloatHec(t *testing.T) { @@ -49,9 +43,9 @@ func TestSerializeMetricFloatHec(t *testing.T) { s, _ := NewSerializer(true, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"time":1529875740.819,"fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricInt(t *testing.T) { @@ -67,10 +61,10 @@ func TestSerializeMetricInt(t *testing.T) { s, _ := NewSerializer(false, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricIntHec(t *testing.T) { @@ -86,10 +80,10 @@ func TestSerializeMetricIntHec(t *testing.T) { s, _ := NewSerializer(true, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"time":0,"fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricBool(t *testing.T) { @@ -98,17 +92,17 @@ func TestSerializeMetricBool(t *testing.T) { "container-name": "telegraf-test", } fields := map[string]interface{}{ - "oomkiller": bool(true), + "oomkiller": true, } m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(false, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricBoolHec(t *testing.T) { @@ -117,17 +111,17 @@ func TestSerializeMetricBoolHec(t *testing.T) { "container-name": "telegraf-test", } fields := map[string]interface{}{ - "oomkiller": bool(false), + "oomkiller": false, } m := metric.New("docker", tags, fields, now) s, _ := NewSerializer(true, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"time":0,"fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMetricString(t *testing.T) { @@ -144,11 +138,11 @@ func TestSerializeMetricString(t *testing.T) { s, _ := NewSerializer(false, false) var buf []byte buf, err := s.Serialize(m) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` - assert.Equal(t, expS, string(buf)) - assert.NoError(t, err) + require.Equal(t, expS, string(buf)) + require.NoError(t, err) } func TestSerializeBatch(t *testing.T) { @@ -173,10 +167,10 @@ func TestSerializeBatch(t *testing.T) { metrics := []telegraf.Metric{m, n} s, _ := NewSerializer(false, false) buf, err := s.SerializeBatch(metrics) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMulti(t *testing.T) { @@ -193,10 +187,10 @@ func TestSerializeMulti(t *testing.T) { metrics := []telegraf.Metric{m} s, _ := NewSerializer(false, true) buf, err := s.SerializeBatch(metrics) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeBatchHec(t *testing.T) { @@ -219,10 +213,10 @@ func TestSerializeBatchHec(t *testing.T) { metrics := []telegraf.Metric{m, n} s, _ := NewSerializer(true, false) buf, err := s.SerializeBatch(metrics) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"time":0,"fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"fields":{"_value":92,"metric_name":"cpu.value"}}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } func TestSerializeMultiHec(t *testing.T) { @@ -239,8 +233,8 @@ func TestSerializeMultiHec(t *testing.T) { metrics := []telegraf.Metric{m} s, _ := NewSerializer(true, true) buf, err := s.SerializeBatch(metrics) - assert.NoError(t, err) + require.NoError(t, err) expS := `{"time":0,"fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` - assert.Equal(t, expS, string(buf)) + require.Equal(t, expS, string(buf)) } diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go index 0abcf799d2a0f..2bb01266eaefe 100755 --- a/plugins/serializers/wavefront/wavefront.go +++ b/plugins/serializers/wavefront/wavefront.go @@ -49,7 +49,7 @@ func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*Wav return s, nil } -func (s *WavefrontSerializer) serialize(m telegraf.Metric) { +func (s *WavefrontSerializer) serializeMetric(m telegraf.Metric) { const metricSeparator = "." for fieldName, value := range m.Fields() { @@ -90,7 +90,7 @@ func (s *WavefrontSerializer) serialize(m telegraf.Metric) { func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { s.mu.Lock() s.scratch.Reset() - s.serialize(m) + s.serializeMetric(m) out := s.scratch.Copy() s.mu.Unlock() return out, nil @@ -100,7 +100,7 @@ func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, s.mu.Lock() s.scratch.Reset() for _, m := range metrics { - s.serialize(m) + s.serializeMetric(m) } out := s.scratch.Copy() s.mu.Unlock() @@ -200,7 +200,7 @@ func (b *buffer) WriteString(s string) { *b = append(*b, s...) } -// This is named WriteChar instead of WriteByte because the 'stdmethods' check +// WriteChar has this name instead of WriteByte because the 'stdmethods' check // of 'go vet' wants WriteByte to have the signature: // // func (b *buffer) WriteByte(c byte) error { ... } diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go index ee653c62b4072..fce637c61da58 100755 --- a/plugins/serializers/wavefront/wavefront_test.go +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/wavefront" - "github.com/stretchr/testify/assert" ) func TestBuildTags(t *testing.T) { @@ -185,7 +186,7 @@ func TestSerializeMetricFloat(t *testing.T) { mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.500000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricInt(t *testing.T) { @@ -204,7 +205,7 @@ func TestSerializeMetricInt(t *testing.T) { mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricBoolTrue(t *testing.T) { @@ -223,7 +224,7 @@ func TestSerializeMetricBoolTrue(t *testing.T) { mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 1.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricBoolFalse(t *testing.T) { @@ -242,7 +243,7 @@ func TestSerializeMetricBoolFalse(t *testing.T) { mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 0.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricFieldValue(t *testing.T) { @@ -257,11 +258,12 @@ func TestSerializeMetricFieldValue(t *testing.T) { m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{} - buf, _ := s.Serialize(m) + buf, err := s.Serialize(m) + require.NoError(t, err) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"cpu\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func TestSerializeMetricPrefix(t *testing.T) { @@ -276,11 +278,12 @@ func TestSerializeMetricPrefix(t *testing.T) { m := metric.New("cpu", tags, fields, now) s := WavefrontSerializer{Prefix: "telegraf."} - buf, _ := s.Serialize(m) + buf, err := s.Serialize(m) + require.NoError(t, err) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} - assert.Equal(t, expS, mS) + require.Equal(t, expS, mS) } func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { @@ -310,7 +313,8 @@ func BenchmarkSerialize(b *testing.B) { metrics := benchmarkMetrics(b) b.ResetTimer() for i := 0; i < b.N; i++ { - s.Serialize(metrics[i%len(metrics)]) + _, err := s.Serialize(metrics[i%len(metrics)]) + require.NoError(b, err) } } @@ -320,6 +324,7 @@ func BenchmarkSerializeBatch(b *testing.B) { metrics := m[:] b.ResetTimer() for i := 0; i < b.N; i++ { - s.SerializeBatch(metrics) + _, err := s.SerializeBatch(metrics) + require.NoError(b, err) } } From d9eb4d06c55aa704c745690b0eecdce95a09c314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 30 Nov 2021 22:18:33 +0100 Subject: [PATCH 090/133] fix: Linter fixes for plugins/aggregators/[a-z]* (#10182) Co-authored-by: Pawel Zak --- plugins/aggregators/basicstats/basicstats.go | 6 ++-- .../aggregators/basicstats/basicstats_test.go | 17 ++++----- .../aggregators/derivative/derivative_test.go | 35 ++++++++++++------ .../aggregators/histogram/histogram_test.go | 36 ++++++------------- plugins/aggregators/merge/merge_test.go | 6 ++-- plugins/aggregators/quantile/algorithms.go | 6 ++-- 6 files changed, 53 insertions(+), 53 deletions(-) diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 4ad6c77056314..458a9b9c99560 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -129,7 +129,7 @@ func (b *BasicStats) Add(in telegraf.Metric) { //variable initialization x := fv mean := tmp.mean - M2 := tmp.M2 + m2 := tmp.M2 //counter compute n := tmp.count + 1 tmp.count = n @@ -138,8 +138,8 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean = mean + delta/n tmp.mean = mean //variance/stdev compute - M2 = M2 + delta*(x-mean) - tmp.M2 = M2 + m2 = m2 + delta*(x-mean) + tmp.M2 = m2 //max/min compute if fv < tmp.min { tmp.min = fv diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 51ecd5c992442..3f08624978446 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var m1 = metric.New("m1", @@ -697,11 +698,11 @@ func TestBasicStatsWithDefaultStats(t *testing.T) { acc := testutil.Accumulator{} aggregator.Push(&acc) - assert.True(t, acc.HasField("m1", "a_count")) - assert.True(t, acc.HasField("m1", "a_min")) - assert.True(t, acc.HasField("m1", "a_max")) - assert.True(t, acc.HasField("m1", "a_mean")) - assert.True(t, acc.HasField("m1", "a_stdev")) - assert.True(t, acc.HasField("m1", "a_s2")) - assert.False(t, acc.HasField("m1", "a_sum")) + require.True(t, acc.HasField("m1", "a_count")) + require.True(t, acc.HasField("m1", "a_min")) + require.True(t, acc.HasField("m1", "a_max")) + require.True(t, acc.HasField("m1", "a_mean")) + require.True(t, acc.HasField("m1", "a_stdev")) + require.True(t, acc.HasField("m1", "a_s2")) + require.False(t, acc.HasField("m1", "a_sum")) } diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go index fb84dae6ff54a..e0c91767018ef 100644 --- a/plugins/aggregators/derivative/derivative_test.go +++ b/plugins/aggregators/derivative/derivative_test.go @@ -4,6 +4,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -40,7 +42,8 @@ func TestTwoFullEventsWithParameter(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Add(finish) @@ -66,7 +69,8 @@ func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(finish) derivative.Add(start) @@ -88,7 +92,8 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) { acc := testutil.Accumulator{} derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() duration, _ := time.ParseDuration("2s") @@ -130,7 +135,8 @@ func TestTwoFullEventsInSeperatePushes(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -163,7 +169,8 @@ func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -195,7 +202,8 @@ func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) // This test relies on RunningAggregator always callining Reset after Push @@ -220,7 +228,8 @@ func TestIgnoresMissingVariable(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) noParameter := metric.New("TestMetric", map[string]string{"state": "no_parameter"}, @@ -260,7 +269,8 @@ func TestMergesDifferenMetricsWithSameHash(t *testing.T) { acc := testutil.Accumulator{} derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() duration, _ := time.ParseDuration("2s") @@ -309,7 +319,8 @@ func TestDropsAggregatesOnMaxRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -332,7 +343,8 @@ func TestAddMetricsResetsRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), Log: testutil.Logger{}, } - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -356,7 +368,8 @@ func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { period, _ := time.ParseDuration("10s") derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() first := metric.New("One Field", diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index c2a05cc283c3d..ad24d5b338528 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) type fields map[string]interface{} @@ -82,9 +83,7 @@ func TestHistogram(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) @@ -106,9 +105,7 @@ func TestHistogramNonCumulative(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"}) @@ -130,9 +127,7 @@ func TestHistogramWithReset(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"}) @@ -155,10 +150,7 @@ func TestHistogramWithAllFields(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) @@ -188,10 +180,7 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"}) @@ -241,7 +230,7 @@ func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { func TestWrongBucketsOrder(t *testing.T) { defer func() { if r := recover(); r != nil { - assert.Equal( + require.Equal( t, "histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a", fmt.Sprint(r), @@ -291,12 +280,9 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa } // check fields with their counts - if assert.Equal(t, fields, checkedMetric.Fields) { - return - } - - assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName)) + require.Equal(t, fields, checkedMetric.Fields) + return } - assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) + require.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) } diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 94e54590b586f..53a55f0853e1a 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -229,7 +229,8 @@ var m2 = metric.New( func BenchmarkMergeOne(b *testing.B) { var merger Merge - merger.Init() + err := merger.Init() + require.NoError(b, err) var acc testutil.NopAccumulator for n := 0; n < b.N; n++ { @@ -241,7 +242,8 @@ func BenchmarkMergeOne(b *testing.B) { func BenchmarkMergeTwo(b *testing.B) { var merger Merge - merger.Init() + err := merger.Init() + require.NoError(b, err) var acc testutil.NopAccumulator for n := 0; n < b.N; n++ { diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go index 641844f3f4e77..e6d73507a1155 100644 --- a/plugins/aggregators/quantile/algorithms.go +++ b/plugins/aggregators/quantile/algorithms.go @@ -49,8 +49,7 @@ func (e *exactAlgorithmR7) Quantile(q float64) float64 { // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 // Same as Excel and Numpy. - N := float64(size) - n := q * (N - 1) + n := q * (float64(size) - 1) i, gamma := math.Modf(n) j := int(i) if j < 0 { @@ -95,8 +94,7 @@ func (e *exactAlgorithmR8) Quantile(q float64) float64 { // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 - N := float64(size) - n := q*(N+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper + n := q*(float64(size)+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper i, gamma := math.Modf(n) j := int(i) if j < 0 { From 27dea9bd8f2b9516712904bb9214a26aadba1d24 Mon Sep 17 00:00:00 2001 From: Vladislav Date: Wed, 1 Dec 2021 00:25:21 +0300 Subject: [PATCH 091/133] feat: add new groundwork output plugin (#9891) --- docs/LICENSE_OF_DEPENDENCIES.md | 1 + go.mod | 3 +- go.sum | 2 + plugins/outputs/all/all.go | 1 + plugins/outputs/groundwork/README.md | 38 +++ plugins/outputs/groundwork/groundwork.go | 289 ++++++++++++++++++ plugins/outputs/groundwork/groundwork_test.go | 96 ++++++ 7 files changed, 429 insertions(+), 1 deletion(-) create mode 100644 plugins/outputs/groundwork/README.md create mode 100644 plugins/outputs/groundwork/groundwork.go create mode 100644 plugins/outputs/groundwork/groundwork_test.go diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index e1caaf320a0f9..f9e46d94c04a4 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -115,6 +115,7 @@ following works: - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) - github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) +- github.com/gwos/tcg/sdk [MIT License](https://github.com/gwos/tcg/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) diff --git a/go.mod b/go.mod index 2caf8e9aede81..1429f31fded52 100644 --- a/go.mod +++ b/go.mod @@ -133,6 +133,7 @@ require ( github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec github.com/hashicorp/consul/api v1.9.1 @@ -141,7 +142,7 @@ require ( github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 diff --git a/go.sum b/go.sum index 727dc6e6d2611..afda01794a65c 100644 --- a/go.sum +++ b/go.sum @@ -1145,6 +1145,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BM github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf h1:xSjgqa6SiBaSC4sTC4HniWRLww2vbl3u0KyMUYeryJI= +github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index ff3f2251a9994..ff23a060b51cc 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -21,6 +21,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/graylog" + _ "github.com/influxdata/telegraf/plugins/outputs/groundwork" _ "github.com/influxdata/telegraf/plugins/outputs/health" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" diff --git a/plugins/outputs/groundwork/README.md b/plugins/outputs/groundwork/README.md new file mode 100644 index 0000000000000..ea0fc92fc8248 --- /dev/null +++ b/plugins/outputs/groundwork/README.md @@ -0,0 +1,38 @@ +# GroundWork Output Plugin + +This plugin writes to a [GroundWork Monitor][1] instance. Plugin only supports GW8+ + +[1]: https://www.gwos.com/product/groundwork-monitor/ + +## Configuration + +```toml +[[outputs.groundwork]] + ## URL of your groundwork instance. + url = "https://groundwork.example.com" + + ## Agent uuid for GroundWork API Server. + agent_id = "" + + ## Username and password to access GroundWork API. + username = "" + password = "" + + ## Default display name for the host with services(metrics). + # default_host = "telegraf" + + ## Default service state. + # default_service_state = "SERVICE_OK" + + ## The name of the tag that contains the hostname. + # resource_tag = "host" +``` + +## List of tags used by the plugin + +* service - to define the name of the service you want to monitor. +* status - to define the status of the service. +* message - to provide any message you want. +* unitType - to use in monitoring contexts(subset of The Unified Code for Units of Measure standard). Supported types: "1", "%cpu", "KB", "GB", "MB". +* warning - to define warning threshold value. +* critical - to define critical threshold value. diff --git a/plugins/outputs/groundwork/groundwork.go b/plugins/outputs/groundwork/groundwork.go new file mode 100644 index 0000000000000..ec11439b8cc45 --- /dev/null +++ b/plugins/outputs/groundwork/groundwork.go @@ -0,0 +1,289 @@ +package groundwork + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/gwos/tcg/sdk/clients" + "github.com/gwos/tcg/sdk/transit" + "github.com/hashicorp/go-uuid" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const sampleConfig = ` + ## URL of your groundwork instance. + url = "https://groundwork.example.com" + + ## Agent uuid for GroundWork API Server. + agent_id = "" + + ## Username and password to access GroundWork API. + username = "" + password = "" + + ## Default display name for the host with services(metrics). + # default_host = "telegraf" + + ## Default service state. + # default_service_state = "SERVICE_OK" + + ## The name of the tag that contains the hostname. + # resource_tag = "host" +` + +type Groundwork struct { + Server string `toml:"url"` + AgentID string `toml:"agent_id"` + Username string `toml:"username"` + Password string `toml:"password"` + DefaultHost string `toml:"default_host"` + DefaultServiceState string `toml:"default_service_state"` + ResourceTag string `toml:"resource_tag"` + Log telegraf.Logger `toml:"-"` + client clients.GWClient +} + +func (g *Groundwork) SampleConfig() string { + return sampleConfig +} + +func (g *Groundwork) Init() error { + if g.Server == "" { + return errors.New("no 'url' provided") + } + if g.AgentID == "" { + return errors.New("no 'agent_id' provided") + } + if g.Username == "" { + return errors.New("no 'username' provided") + } + if g.Password == "" { + return errors.New("no 'password' provided") + } + if g.DefaultHost == "" { + return errors.New("no 'default_host' provided") + } + if g.ResourceTag == "" { + return errors.New("no 'resource_tag' provided") + } + if !validStatus(g.DefaultServiceState) { + return errors.New("invalid 'default_service_state' provided") + } + + g.client = clients.GWClient{ + AppName: "telegraf", + AppType: "TELEGRAF", + GWConnection: &clients.GWConnection{ + HostName: g.Server, + UserName: g.Username, + Password: g.Password, + IsDynamicInventory: true, + }, + } + return nil +} + +func (g *Groundwork) Connect() error { + err := g.client.Connect() + if err != nil { + return fmt.Errorf("could not log in: %v", err) + } + return nil +} + +func (g *Groundwork) Close() error { + err := g.client.Disconnect() + if err != nil { + return fmt.Errorf("could not log out: %v", err) + } + return nil +} + +func (g *Groundwork) Write(metrics []telegraf.Metric) error { + resourceToServicesMap := make(map[string][]transit.DynamicMonitoredService) + for _, metric := range metrics { + resource, service, err := g.parseMetric(metric) + if err != nil { + g.Log.Errorf("%v", err) + continue + } + resourceToServicesMap[resource] = append(resourceToServicesMap[resource], *service) + } + + var resources []transit.DynamicMonitoredResource + for resourceName, services := range resourceToServicesMap { + resources = append(resources, transit.DynamicMonitoredResource{ + BaseResource: transit.BaseResource{ + BaseTransitData: transit.BaseTransitData{ + Name: resourceName, + Type: transit.Host, + }, + }, + Status: transit.HostUp, + LastCheckTime: transit.NewTimestamp(), + Services: services, + }) + } + + traceToken, err := uuid.GenerateUUID() + if err != nil { + return err + } + requestJSON, err := json.Marshal(transit.DynamicResourcesWithServicesRequest{ + Context: &transit.TracerContext{ + AppType: "TELEGRAF", + AgentID: g.AgentID, + TraceToken: traceToken, + TimeStamp: transit.NewTimestamp(), + Version: transit.ModelVersion, + }, + Resources: resources, + Groups: nil, + }) + + if err != nil { + return err + } + + _, err = g.client.SendResourcesWithMetrics(context.Background(), requestJSON) + if err != nil { + return fmt.Errorf("error while sending: %v", err) + } + + return nil +} + +func (g *Groundwork) Description() string { + return "Send telegraf metrics to GroundWork Monitor" +} + +func init() { + outputs.Add("groundwork", func() telegraf.Output { + return &Groundwork{ + ResourceTag: "host", + DefaultHost: "telegraf", + DefaultServiceState: string(transit.ServiceOk), + } + }) +} + +func (g *Groundwork) parseMetric(metric telegraf.Metric) (string, *transit.DynamicMonitoredService, error) { + resource := g.DefaultHost + if value, present := metric.GetTag(g.ResourceTag); present { + resource = value + } + + service := metric.Name() + if value, present := metric.GetTag("service"); present { + service = value + } + + status := g.DefaultServiceState + value, statusPresent := metric.GetTag("status") + if validStatus(value) { + status = value + } + + message, _ := metric.GetTag("message") + + unitType := string(transit.UnitCounter) + if value, present := metric.GetTag("unitType"); present { + unitType = value + } + + var critical float64 + value, criticalPresent := metric.GetTag("critical") + if criticalPresent { + if s, err := strconv.ParseFloat(value, 64); err == nil { + critical = s + } + } + + var warning float64 + value, warningPresent := metric.GetTag("warning") + if warningPresent { + if s, err := strconv.ParseFloat(value, 64); err == nil { + warning = s + } + } + + lastCheckTime := transit.NewTimestamp() + lastCheckTime.Time = metric.Time() + serviceObject := transit.DynamicMonitoredService{ + BaseTransitData: transit.BaseTransitData{ + Name: service, + Type: transit.Service, + Owner: resource, + }, + Status: transit.MonitorStatus(status), + LastCheckTime: lastCheckTime, + LastPlugInOutput: message, + Metrics: nil, + } + + for _, value := range metric.FieldList() { + var thresholds []transit.ThresholdValue + if warningPresent { + thresholds = append(thresholds, transit.ThresholdValue{ + SampleType: transit.Warning, + Label: value.Key + "_wn", + Value: &transit.TypedValue{ + ValueType: transit.DoubleType, + DoubleValue: warning, + }, + }) + } + if criticalPresent { + thresholds = append(thresholds, transit.ThresholdValue{ + SampleType: transit.Critical, + Label: value.Key + "_cr", + Value: &transit.TypedValue{ + ValueType: transit.DoubleType, + DoubleValue: critical, + }, + }) + } + + typedValue := new(transit.TypedValue) + err := typedValue.FromInterface(value.Value) + if err != nil { + return "", nil, err + } + + serviceObject.Metrics = append(serviceObject.Metrics, transit.TimeSeries{ + MetricName: value.Key, + SampleType: transit.Value, + Interval: &transit.TimeInterval{ + EndTime: lastCheckTime, + }, + Value: typedValue, + Unit: transit.UnitType(unitType), + Thresholds: &thresholds, + }) + } + + if !statusPresent { + serviceStatus, err := transit.CalculateServiceStatus(&serviceObject.Metrics) + if err != nil { + g.Log.Infof("could not calculate service status, reverting to default_service_state: %v", err) + serviceObject.Status = transit.MonitorStatus(g.DefaultServiceState) + } + serviceObject.Status = serviceStatus + } + + return resource, &serviceObject, nil +} + +func validStatus(status string) bool { + switch transit.MonitorStatus(status) { + case transit.ServiceOk, transit.ServiceWarning, transit.ServicePending, transit.ServiceScheduledCritical, + transit.ServiceUnscheduledCritical, transit.ServiceUnknown: + return true + } + return false +} diff --git a/plugins/outputs/groundwork/groundwork_test.go b/plugins/outputs/groundwork/groundwork_test.go new file mode 100644 index 0000000000000..16ae1f057501f --- /dev/null +++ b/plugins/outputs/groundwork/groundwork_test.go @@ -0,0 +1,96 @@ +package groundwork + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gwos/tcg/sdk/clients" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +const ( + defaultTestAgentID = "ec1676cc-583d-48ee-b035-7fb5ed0fcf88" + defaultHost = "telegraf" +) + +func TestWrite(t *testing.T) { + // Generate test metric with default name to test Write logic + floatMetric := testutil.TestMetric(1.0, "Float") + stringMetric := testutil.TestMetric("Test", "String") + + // Simulate Groundwork server that should receive custom metrics + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + // Decode body to use in assertations below + var obj groundworkObject + err = json.Unmarshal(body, &obj) + require.NoError(t, err) + + // Check if server gets valid metrics object + require.Equal(t, obj.Context.AgentID, defaultTestAgentID) + require.Equal(t, obj.Resources[0].Name, defaultHost) + require.Equal( + t, + obj.Resources[0].Services[0].Name, + "Float", + ) + require.Equal( + t, + obj.Resources[0].Services[0].Metrics[0].Value.DoubleValue, + 1.0, + ) + require.Equal( + t, + obj.Resources[0].Services[1].Metrics[0].Value.StringValue, + "Test", + ) + + _, err = fmt.Fprintln(w, `OK`) + require.NoError(t, err) + })) + + i := Groundwork{ + Server: server.URL, + AgentID: defaultTestAgentID, + DefaultHost: "telegraf", + client: clients.GWClient{ + AppName: "telegraf", + AppType: "TELEGRAF", + GWConnection: &clients.GWConnection{ + HostName: server.URL, + }, + }, + } + + err := i.Write([]telegraf.Metric{floatMetric, stringMetric}) + require.NoError(t, err) + + defer server.Close() +} + +type groundworkObject struct { + Context struct { + AgentID string `json:"agentId"` + } `json:"context"` + Resources []struct { + Name string `json:"name"` + Services []struct { + Name string `json:"name"` + Metrics []struct { + Value struct { + StringValue string `json:"stringValue"` + DoubleValue float64 `json:"doubleValue"` + } `json:"value"` + } + } `json:"services"` + } `json:"resources"` +} From 3627961add0118777c14d5e49140f75946c9c287 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 30 Nov 2021 22:46:46 +0100 Subject: [PATCH 092/133] fix: Linter fixes for plugins/common/[a-z]* (#10189) Co-authored-by: Pawel Zak --- plugins/common/kafka/scram_client.go | 3 +-- plugins/common/logrus/hook.go | 2 +- plugins/common/proxy/proxy.go | 4 ++-- plugins/common/shim/config_test.go | 9 ++++++--- plugins/common/shim/goshim.go | 11 +++++++---- plugins/common/shim/goshim_test.go | 12 ++++++++---- plugins/common/shim/input_test.go | 9 ++++++--- plugins/common/shim/logger.go | 7 +++---- plugins/common/shim/processor_test.go | 7 ++++--- plugins/common/starlark/builtins.go | 11 ++++++----- plugins/common/starlark/field_dict.go | 17 +++++++++-------- plugins/common/starlark/metric.go | 21 +++++++++++---------- plugins/common/starlark/tag_dict.go | 17 +++++++++-------- plugins/common/tls/config_test.go | 5 ++++- plugins/common/tls/utils.go | 6 +++--- 15 files changed, 80 insertions(+), 61 deletions(-) diff --git a/plugins/common/kafka/scram_client.go b/plugins/common/kafka/scram_client.go index f6aa9d6c4e285..765e76e96f7e1 100644 --- a/plugins/common/kafka/scram_client.go +++ b/plugins/common/kafka/scram_client.go @@ -27,8 +27,7 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { - response, err = x.ClientConversation.Step(challenge) - return + return x.ClientConversation.Step(challenge) } func (x *XDGSCRAMClient) Done() bool { diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index 7451639a75423..7596fbbed3194 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -14,7 +14,7 @@ var once sync.Once type LogHook struct { } -// Install a logging hook into the logrus standard logger, diverting all logs +// InstallHook installs a logging hook into the logrus standard logger, diverting all logs // through the Telegraf logger at debug level. This is useful for libraries // that directly log to the logrus system without providing an override method. func InstallHook() { diff --git a/plugins/common/proxy/proxy.go b/plugins/common/proxy/proxy.go index 4ef97f1eb52e8..00efbb7ae7b0f 100644 --- a/plugins/common/proxy/proxy.go +++ b/plugins/common/proxy/proxy.go @@ -14,11 +14,11 @@ type proxyFunc func(req *http.Request) (*url.URL, error) func (p *HTTPProxy) Proxy() (proxyFunc, error) { if len(p.HTTPProxyURL) > 0 { - url, err := url.Parse(p.HTTPProxyURL) + address, err := url.Parse(p.HTTPProxyURL) if err != nil { return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) } - return http.ProxyURL(url), nil + return http.ProxyURL(address), nil } return http.ProxyFromEnvironment, nil } diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 762ca5dd283b2..ffe58a1d5de0a 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -5,16 +5,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" tgConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/processors" - "github.com/stretchr/testify/require" ) func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + err := os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") + require.NoError(t, err) + err = os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, err) inputs.Add("test", func() telegraf.Input { return &serviceInput{} diff --git a/plugins/common/shim/goshim.go b/plugins/common/shim/goshim.go index 7be139194520f..ad03cff22d79e 100644 --- a/plugins/common/shim/goshim.go +++ b/plugins/common/shim/goshim.go @@ -84,13 +84,13 @@ func (s *Shim) Run(pollInterval time.Duration) error { if err != nil { return fmt.Errorf("RunProcessor error: %w", err) } - } else if s.Output != nil { + } else if s.Output != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code err := s.RunOutput() if err != nil { return fmt.Errorf("RunOutput error: %w", err) } } else { - return fmt.Errorf("Nothing to run") + return fmt.Errorf("nothing to run") } return nil @@ -102,7 +102,7 @@ func hasQuit(ctx context.Context) bool { func (s *Shim) writeProcessedMetrics() error { serializer := influx.NewSerializer() - for { + for { //nolint:gosimple // for-select used on purpose select { case m, open := <-s.metricCh: if !open { @@ -113,7 +113,10 @@ func (s *Shim) writeProcessedMetrics() error { return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + _, err = fmt.Fprint(s.stdout, string(b)) + if err != nil { + return fmt.Errorf("failed to write metric: %s", err) + } } } } diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go index bbd1a0b703cc5..0f2bd4c7d3bb9 100644 --- a/plugins/common/shim/goshim_test.go +++ b/plugins/common/shim/goshim_test.go @@ -8,8 +8,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" ) func TestShimSetsUpLogger(t *testing.T) { @@ -18,7 +19,8 @@ func TestShimSetsUpLogger(t *testing.T) { runErroringInputPlugin(t, 40*time.Second, stdinReader, nil, stderrWriter) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) // <-metricProcessed @@ -27,7 +29,8 @@ func TestShimSetsUpLogger(t *testing.T) { require.NoError(t, err) require.Contains(t, out, "Error in plugin: intentional") - stdinWriter.Close() + err = stdinWriter.Close() + require.NoError(t, err) } func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) { @@ -46,7 +49,8 @@ func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reade shim.stderr = stderr log.SetOutput(stderr) } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 9a0423261ac14..26d164e54c3e7 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -34,7 +34,8 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -43,7 +44,8 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + err = stdinWriter.Close() + require.NoError(t, err) go func() { _, _ = io.ReadAll(r) }() @@ -68,7 +70,8 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou if stderr != nil { shim.stderr = stderr } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go index c8a6ee12ba350..3a6dcb0868d0d 100644 --- a/plugins/common/shim/logger.go +++ b/plugins/common/shim/logger.go @@ -66,7 +66,7 @@ func (l *Logger) Info(args ...interface{}) { // setLoggerOnPlugin injects the logger into the plugin, // if it defines Log telegraf.Logger. This is sort of like SetLogger but using // reflection instead of forcing the plugin author to define the function for it -func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { +func setLoggerOnPlugin(i interface{}, logger telegraf.Logger) { valI := reflect.ValueOf(i) if valI.Type().Kind() != reflect.Ptr { @@ -78,10 +78,9 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { return } - switch field.Type().String() { - case "telegraf.Logger": + if field.Type().String() == "telegraf.Logger" { if field.CanSet() { - field.Set(reflect.ValueOf(log)) + field.Set(reflect.ValueOf(logger)) } } } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index bc00fb70d1bba..072367a98dcf9 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -8,11 +8,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/stretchr/testify/require" ) func TestProcessorShim(t *testing.T) { @@ -95,8 +96,8 @@ type testProcessor struct { } func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - metric.AddTag(p.tagName, p.tagValue) + for _, m := range in { + m.AddTag(p.tagName, p.tagValue) } return in } diff --git a/plugins/common/starlark/builtins.go b/plugins/common/starlark/builtins.go index 7adcd115d13ff..9bca11af77837 100644 --- a/plugins/common/starlark/builtins.go +++ b/plugins/common/starlark/builtins.go @@ -5,8 +5,9 @@ import ( "sort" "time" - "github.com/influxdata/telegraf/metric" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf/metric" ) func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { @@ -210,11 +211,11 @@ func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) } defer iter2.Done() - len := starlark.Len(pair) - if len < 0 { + length := starlark.Len(pair) + if length < 0 { return nil, fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) + } else if length != 2 { + return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, length) } var k, v starlark.Value iter2.Next(&k) diff --git a/plugins/common/starlark/field_dict.go b/plugins/common/starlark/field_dict.go index 08f6249023e17..8b09a045be8e9 100644 --- a/plugins/common/starlark/field_dict.go +++ b/plugins/common/starlark/field_dict.go @@ -6,8 +6,9 @@ import ( "reflect" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // FieldDict is a starlark.Value for the metric fields. It is heavily based on the @@ -18,17 +19,17 @@ type FieldDict struct { func (d FieldDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -181,7 +182,7 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d FieldDict) Iterate() starlark.Iterator { d.fieldIterCount++ return &FieldIterator{Metric: d.Metric, fields: d.metric.FieldList()} diff --git a/plugins/common/starlark/metric.go b/plugins/common/starlark/metric.go index 031d24ad69635..989c345765cff 100644 --- a/plugins/common/starlark/metric.go +++ b/plugins/common/starlark/metric.go @@ -6,8 +6,9 @@ import ( "strings" "time" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) type Metric struct { @@ -36,15 +37,15 @@ func (m *Metric) Unwrap() telegraf.Metric { // it behaves more like the repr function would in Python. func (m *Metric) String() string { buf := new(strings.Builder) - buf.WriteString("Metric(") - buf.WriteString(m.Name().String()) - buf.WriteString(", tags=") - buf.WriteString(m.Tags().String()) - buf.WriteString(", fields=") - buf.WriteString(m.Fields().String()) - buf.WriteString(", time=") - buf.WriteString(m.Time().String()) - buf.WriteString(")") + buf.WriteString("Metric(") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Name().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", tags=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Tags().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", fields=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Fields().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", time=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Time().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(")") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } diff --git a/plugins/common/starlark/tag_dict.go b/plugins/common/starlark/tag_dict.go index 999f8736575db..56ee0f6551d81 100644 --- a/plugins/common/starlark/tag_dict.go +++ b/plugins/common/starlark/tag_dict.go @@ -5,8 +5,9 @@ import ( "fmt" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // TagDict is a starlark.Value for the metric tags. It is heavily based on the @@ -17,17 +18,17 @@ type TagDict struct { func (d TagDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -168,7 +169,7 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d TagDict) Iterate() starlark.Iterator { d.tagIterCount++ return &TagIterator{Metric: d.Metric, tags: d.metric.TagList()} diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index 5fee4a5e08214..123523bb54f05 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") @@ -344,6 +345,8 @@ func TestConnect(t *testing.T) { resp, err := client.Get(ts.URL) require.NoError(t, err) + + defer resp.Body.Close() require.Equal(t, 200, resp.StatusCode) } diff --git a/plugins/common/tls/utils.go b/plugins/common/tls/utils.go index ddc12d2c1e5e3..65388640f7dd8 100644 --- a/plugins/common/tls/utils.go +++ b/plugins/common/tls/utils.go @@ -10,11 +10,11 @@ func ParseCiphers(ciphers []string) ([]uint16, error) { suites := []uint16{} for _, cipher := range ciphers { - if v, ok := tlsCipherMap[cipher]; ok { - suites = append(suites, v) - } else { + v, ok := tlsCipherMap[cipher] + if !ok { return nil, fmt.Errorf("unsupported cipher %q", cipher) } + suites = append(suites, v) } return suites, nil From c875e454222d488fa2dac4e7a5210c74635832de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20=C5=BBak?= Date: Tue, 30 Nov 2021 22:59:24 +0100 Subject: [PATCH 093/133] fix: Linter fixes for plugins/inputs/[a-o]* (leftovers) (#10192) Co-authored-by: Pawel Zak --- plugins/inputs/bind/bind_test.go | 23 ++++---- plugins/inputs/cassandra/cassandra_test.go | 27 ++++----- plugins/inputs/cloud_pubsub/pubsub_test.go | 21 +++---- plugins/inputs/csgo/csgo_test.go | 27 +++++---- plugins/inputs/dcos/client.go | 8 +-- plugins/inputs/dcos/dcos.go | 6 +- plugins/inputs/dns_query/dns_query_test.go | 59 +++++++++---------- plugins/inputs/ecs/client_test.go | 18 +++--- plugins/inputs/ethtool/ethtool_test.go | 18 +++--- plugins/inputs/exec/exec_test.go | 14 ++--- plugins/inputs/execd/shim/goshim_posix.go | 7 +-- plugins/inputs/execd/shim/shim_test.go | 4 +- plugins/inputs/graylog/graylog_test.go | 16 ++--- plugins/inputs/hddtemp/hddtemp_test.go | 3 +- .../http_response/http_response_test.go | 12 ++-- plugins/inputs/httpjson/httpjson_test.go | 52 ++++++++-------- plugins/inputs/jolokia/jolokia_test.go | 28 ++++----- plugins/inputs/jolokia2/gatherer_test.go | 6 +- plugins/inputs/jolokia2/jolokia_test.go | 22 +++---- .../inputs/kafka_consumer/kafka_consumer.go | 26 ++++---- .../kafka_consumer/kafka_consumer_test.go | 4 +- .../inputs/minecraft/internal/rcon/rcon.go | 25 ++++---- plugins/inputs/opcua/opcua_client.go | 3 +- plugins/inputs/opcua/opcua_util.go | 20 ++++--- 24 files changed, 221 insertions(+), 228 deletions(-) diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index f7849e1735255..db2358239cc17 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -7,8 +7,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestBindJsonStats(t *testing.T) { @@ -29,7 +30,7 @@ func TestBindJsonStats(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -179,8 +180,8 @@ func TestBindJsonStats(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -202,7 +203,7 @@ func TestBindXmlStatsV2(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -384,8 +385,8 @@ func TestBindXmlStatsV2(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -407,7 +408,7 @@ func TestBindXmlStatsV3(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -611,8 +612,8 @@ func TestBindXmlStatsV3(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -623,5 +624,5 @@ func TestBindUnparseableURL(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.Contains(t, err.Error(), "unable to parse address") + require.Contains(t, err.Error(), "unable to parse address") } diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index f167f50e7187f..35551cf847970 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -1,15 +1,14 @@ package cassandra import ( - _ "fmt" "io" "net/http" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validJavaMultiValueJSON = ` @@ -138,8 +137,8 @@ func TestHttpJsonJavaMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "HeapMemoryUsage_init": 67108864.0, @@ -167,8 +166,8 @@ func TestHttpJsonJavaMultiType(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "CollectionCount": 1.0, @@ -188,9 +187,9 @@ func TestHttp404(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected for class=Cassandra @@ -200,8 +199,8 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "ReadLatency_999thPercentile": 20.0, @@ -232,8 +231,8 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields1 := map[string]interface{}{ "ReadLatency_999thPercentile": 1.0, diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index d07dfe34f2290..e27c1e8104bcf 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -5,9 +5,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) const ( @@ -53,7 +54,7 @@ func TestRunParse(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -98,7 +99,7 @@ func TestRunBase64(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -145,7 +146,7 @@ func TestRunInvalidMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunOverlongMessages(t *testing.T) { @@ -192,7 +193,7 @@ func TestRunOverlongMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunErrorInSubscriber(t *testing.T) { @@ -228,12 +229,12 @@ func TestRunErrorInSubscriber(t *testing.T) { t.Fatal("expected plugin subscription to be non-nil") } acc.WaitError(1) - assert.Regexp(t, fakeErrStr, acc.Errors[0]) + require.Regexp(t, fakeErrStr, acc.Errors[0]) } func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { - assert.Equal(t, "cpu_load_short", m.Measurement) - assert.Equal(t, "server01", m.Tags["host"]) - assert.Equal(t, 23422.0, m.Fields["value"]) - assert.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) + require.Equal(t, "cpu_load_short", m.Measurement) + require.Equal(t, "server01", m.Tags["host"]) + require.Equal(t, 23422.0, m.Fields["value"]) + require.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) } diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go index b1d1c9b693814..ca849819842ed 100644 --- a/plugins/inputs/csgo/csgo_test.go +++ b/plugins/inputs/csgo/csgo_test.go @@ -1,10 +1,11 @@ package csgo import ( - "github.com/influxdata/telegraf/testutil" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const testInput = `CPU NetIn NetOut Uptime Maps FPS Players Svms +-ms ~tick @@ -28,17 +29,17 @@ func TestCPUStats(t *testing.T) { t.Errorf("acc.HasMeasurement: expected csgo") } - assert.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) - assert.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) - assert.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) - assert.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) - assert.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) - assert.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) - assert.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) - assert.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) - assert.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) - assert.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) - assert.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) + require.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) + require.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) + require.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) + require.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) + require.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) + require.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) + require.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) + require.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) + require.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) + require.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) + require.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) } func requestMock(_ string, _ string) (string, error) { diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 08943d13db0f9..34ab30ea52274 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v4" ) const ( @@ -100,7 +100,7 @@ type ClusterClient struct { type claims struct { UID string `json:"uid"` - jwt.StandardClaims + jwt.RegisteredClaims } func (e APIError) Error() string { @@ -327,9 +327,9 @@ func (c *ClusterClient) toURL(path string) string { func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{ UID: sa.AccountID, - StandardClaims: jwt.StandardClaims{ + RegisteredClaims: jwt.RegisteredClaims{ // How long we have to login with this token - ExpiresAt: time.Now().Add(time.Minute * 5).Unix(), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Minute * 5)), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index dd8f22f7292f5..5712afcfa3ca6 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -9,7 +9,7 @@ import ( "sync" "time" - jwt "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -237,9 +237,7 @@ func (d *DCOS) createPoints(m *Metrics) []*point { fieldKey = fieldKey + "_bytes" } - if strings.HasPrefix(fieldKey, "dcos_metrics_module_") { - fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") - } + fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") tagset := make([]string, 0, len(tags)) for k, v := range tags { diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index c1dd7abf06121..2e57e2f7b07ba 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -4,11 +4,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var servers = []string{"8.8.8.8"} @@ -25,12 +24,12 @@ func TestGathering(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringMxRecord(t *testing.T) { @@ -45,12 +44,12 @@ func TestGatheringMxRecord(t *testing.T) { dnsConfig.RecordType = "MX" err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringRootDomain(t *testing.T) { @@ -71,12 +70,12 @@ func TestGatheringRootDomain(t *testing.T) { "result": "success", } fields := map[string]interface{}{ - "rcode_value": int(0), + "rcode_value": 0, "result_code": uint64(0), } err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -102,12 +101,12 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { "result": "success", } fields := map[string]interface{}{ - "rcode_value": int(0), + "rcode_value": 0, "result_code": uint64(0), } err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -134,9 +133,9 @@ func TestGatheringTimeout(t *testing.T) { }() select { case err := <-channel: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(time.Second * 2): - assert.Fail(t, "DNS query did not timeout") + require.Fail(t, "DNS query did not timeout") } } @@ -145,16 +144,16 @@ func TestSettingDefaultValues(t *testing.T) { dnsConfig.setDefaultValues() - assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") - assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") - assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") + require.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") + require.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") dnsConfig = DNSQuery{Domains: []string{"."}} dnsConfig.setDefaultValues() - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") } func TestRecordTypeParser(t *testing.T) { @@ -163,47 +162,47 @@ func TestRecordTypeParser(t *testing.T) { dnsConfig.RecordType = "A" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeA, recordType) + require.Equal(t, dns.TypeA, recordType) dnsConfig.RecordType = "AAAA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeAAAA, recordType) + require.Equal(t, dns.TypeAAAA, recordType) dnsConfig.RecordType = "ANY" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeANY, recordType) + require.Equal(t, dns.TypeANY, recordType) dnsConfig.RecordType = "CNAME" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeCNAME, recordType) + require.Equal(t, dns.TypeCNAME, recordType) dnsConfig.RecordType = "MX" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeMX, recordType) + require.Equal(t, dns.TypeMX, recordType) dnsConfig.RecordType = "NS" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeNS, recordType) + require.Equal(t, dns.TypeNS, recordType) dnsConfig.RecordType = "PTR" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypePTR, recordType) + require.Equal(t, dns.TypePTR, recordType) dnsConfig.RecordType = "SOA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSOA, recordType) + require.Equal(t, dns.TypeSOA, recordType) dnsConfig.RecordType = "SPF" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSPF, recordType) + require.Equal(t, dns.TypeSPF, recordType) dnsConfig.RecordType = "SRV" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSRV, recordType) + require.Equal(t, dns.TypeSRV, recordType) dnsConfig.RecordType = "TXT" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeTXT, recordType) + require.Equal(t, dns.TypeTXT, recordType) } func TestRecordTypeParserError(t *testing.T) { @@ -212,5 +211,5 @@ func TestRecordTypeParserError(t *testing.T) { dnsConfig.RecordType = "nil" _, err = dnsConfig.parseRecordType() - assert.Error(t, err) + require.Error(t, err) } diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 7e9d7e393346f..14b32c6851db7 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type pollMock struct { @@ -80,8 +80,8 @@ func TestEcsClient_PollSync(t *testing.T) { t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) - assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) + require.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) + require.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) }) } } @@ -160,7 +160,7 @@ func TestEcsClient_Task(t *testing.T) { t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) }) } } @@ -234,7 +234,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) }) } } @@ -268,10 +268,10 @@ func TestResolveTaskURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveTaskURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } @@ -305,10 +305,10 @@ func TestResolveStatsURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveStatsURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index f9573ee054429..e348427d05366 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" ) @@ -310,8 +310,8 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) expectedTagsEth1 := map[string]string{ @@ -334,8 +334,8 @@ func TestGatherIncludeInterfaces(t *testing.T) { command.InterfaceInclude = append(command.InterfaceInclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -361,8 +361,8 @@ func TestGatherIgnoreInterfaces(t *testing.T) { command.InterfaceExclude = append(command.InterfaceExclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should not contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -489,8 +489,8 @@ func TestNormalizedKeys(t *testing.T) { var acc testutil.Accumulator err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) acc.AssertContainsFields(t, pluginName, toStringMapInterface(c.expectedFields)) acc.AssertContainsTaggedFields(t, pluginName, toStringMapInterface(c.expectedFields), expectedTags) diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d0647476c77ae..22465318bbe71 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -13,10 +13,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const validJSON = ` @@ -94,7 +94,7 @@ func TestExec(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(e.Gather) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") + require.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), @@ -123,7 +123,7 @@ func TestExecMalformed(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestCommandError(t *testing.T) { @@ -140,7 +140,7 @@ func TestCommandError(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestExecCommandWithGlob(t *testing.T) { @@ -263,14 +263,14 @@ func TestRemoveCarriageReturns(t *testing.T) { for _, test := range crTests { b := bytes.NewBuffer(test.input) out := removeWindowsCarriageReturns(*b) - assert.True(t, bytes.Equal(test.output, out.Bytes())) + require.True(t, bytes.Equal(test.output, out.Bytes())) } } else { // Test that the buffer is returned unaltered for _, test := range crTests { b := bytes.NewBuffer(test.input) out := removeWindowsCarriageReturns(*b) - assert.True(t, bytes.Equal(test.input, out.Bytes())) + require.True(t, bytes.Equal(test.input, out.Bytes())) } } } diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 8d7faa2268878..c1a3d0ea24d84 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -15,10 +15,7 @@ func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt ch signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) go func() { - select { - case <-ctx.Done(): - // context done. stop to signals to avoid pushing messages to a closed channel - signal.Stop(collectMetricsPrompt) - } + <-ctx.Done() + signal.Stop(collectMetricsPrompt) }() } diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 396928ff44036..1059bc2b7f2db 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -121,10 +121,10 @@ func TestLoadConfig(t *testing.T) { }) c := "./testdata/plugin.conf" - inputs, err := LoadConfig(&c) + loadedInputs, err := LoadConfig(&c) require.NoError(t, err) - inp := inputs[0].(*serviceInput) + inp := loadedInputs[0].(*serviceInput) require.Equal(t, "awesome name", inp.ServiceName) require.Equal(t, "xxxxxxxxxx", inp.SecretToken) diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index 5739969e3df01..108d3bc28dad6 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -172,8 +172,8 @@ func TestHttpJson500(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -183,8 +183,8 @@ func TestHttpJsonBadJson(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response objectgT @@ -194,6 +194,6 @@ func TestHttpJsonEmptyResponse(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 769022049d17a..44be91bb28bf9 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,7 +3,6 @@ package hddtemp import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" @@ -44,7 +43,7 @@ func TestFetch(t *testing.T) { err := hddTemp.Gather(acc) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) var tests = []struct { fields map[string]interface{} diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5d109d0a35439..0d537f5358433 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -16,12 +16,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Receives a list with fields that are expected to be absent @@ -168,8 +168,8 @@ func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[stri func TestHeaders(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cHeader := r.Header.Get("Content-Type") - assert.Equal(t, "Hello", r.Host) - assert.Equal(t, "application/json", cHeader) + require.Equal(t, "Hello", r.Host) + require.Equal(t, "application/json", cHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() @@ -1100,7 +1100,7 @@ func TestRedirect(t *testing.T) { func TestBasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { aHeader := r.Header.Get("Authorization") - assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) + require.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() @@ -1277,7 +1277,7 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { func TestSNI(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) + require.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) w.WriteHeader(http.StatusOK) })) defer ts.Close() diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index b203238a94037..c522ebe9978d2 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -8,9 +8,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -212,7 +212,7 @@ func TestHttpJson200(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(service.Gather) require.NoError(t, err) - assert.Equal(t, 12, acc.NFields()) + require.Equal(t, 12, acc.NFields()) // Set responsetime for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 @@ -231,7 +231,7 @@ func TestHttpJson200(t *testing.T) { func TestHttpJsonGET_URL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -304,7 +304,7 @@ func TestHttpJsonGET(t *testing.T) { } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -378,8 +378,8 @@ func TestHttpJsonPOST(t *testing.T) { } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - assert.NoError(t, err) - assert.Equal(t, "api_key=mykey", string(body)) + require.NoError(t, err) + require.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) _, err = fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -453,8 +453,8 @@ func TestHttpJson500(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to HTTP 405 @@ -465,8 +465,8 @@ func TestHttpJsonBadMethod(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -476,8 +476,8 @@ func TestHttpJsonBadJson(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response object @@ -486,7 +486,7 @@ func TestHttpJsonEmptyResponse(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.NoError(t, err) + require.NoError(t, err) } // Test that the proper values are ignored or collected @@ -502,7 +502,7 @@ func TestHttpJson200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 4, acc.NFields()) + require.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv, "role": "master", "build": "123"} fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} @@ -540,22 +540,22 @@ func TestHttpJsonArray200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 8, acc.NFields()) - assert.Equal(t, uint64(4), acc.NMetrics()) + require.Equal(t, 8, acc.NFields()) + require.Equal(t, uint64(4), acc.NMetrics()) for _, m := range acc.Metrics { if m.Tags["role"] == "master" { - assert.Equal(t, "123", m.Tags["build"]) - assert.Equal(t, float64(15), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "123", m.Tags["build"]) + require.Equal(t, float64(15), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else if m.Tags["role"] == "slave" { - assert.Equal(t, "456", m.Tags["build"]) - assert.Equal(t, float64(17), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "456", m.Tags["build"]) + require.Equal(t, float64(17), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else { - assert.FailNow(t, "unknown metric") + require.FailNow(t, "unknown metric") } } } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index e91e9a1087fda..084a84577fdc9 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validThreeLevelMultiValueJSON = ` @@ -143,8 +143,8 @@ func TestHttpJsonMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -167,8 +167,8 @@ func TestHttpJsonBulkResponse(t *testing.T) { var acc testutil.Accumulator err := jolokia.Gather(&acc) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -195,8 +195,8 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_java.lang:type=Memory_ObjectPendingFinalizationCount": 0.0, @@ -228,9 +228,9 @@ func TestHttp404(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected @@ -241,7 +241,7 @@ func TestHttpInvalidJson(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "error decoding JSON response") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "error decoding JSON response") } diff --git a/plugins/inputs/jolokia2/gatherer_test.go b/plugins/inputs/jolokia2/gatherer_test.go index 4ba4b586ad5f4..e01c603addaeb 100644 --- a/plugins/inputs/jolokia2/gatherer_test.go +++ b/plugins/inputs/jolokia2/gatherer_test.go @@ -3,7 +3,7 @@ package jolokia2 import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJolokia2_makeReadRequests(t *testing.T) { @@ -96,9 +96,9 @@ func TestJolokia2_makeReadRequests(t *testing.T) { for _, c := range cases { payload := makeReadRequests([]Metric{c.metric}) - assert.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) + require.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) for _, actual := range payload { - assert.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) + require.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) } } } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 01750bf002ff5..af22a27358b32 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" @@ -80,7 +80,7 @@ func TestJolokia2_ScalarValues(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "scalar_without_attribute", map[string]interface{}{ "value": 123.0, @@ -240,7 +240,7 @@ func TestJolokia2_ObjectValues(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "object_without_attribute", map[string]interface{}{ "biz": 123.0, @@ -328,7 +328,7 @@ func TestJolokia2_StatusCodes(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "ok", map[string]interface{}{ "value": 1.0, @@ -378,7 +378,7 @@ func TestJolokia2_TagRenaming(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_tag_prefix", map[string]interface{}{ "value": 123.0, @@ -471,7 +471,7 @@ func TestJolokia2_FieldRenaming(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_field_modifiers", map[string]interface{}{ "DEFAULT_PREFIX_hello_DEFAULT_SEPARATOR_world": 123.0, @@ -579,7 +579,7 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "mbean_name_and_object_keys", map[string]interface{}{ "value": 123.0, @@ -672,7 +672,7 @@ func TestJolokia2_MetricCompaction(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "compact_metric", map[string]interface{}{ "value": 123.0, @@ -733,7 +733,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "hello", map[string]interface{}{ "value": 123.0, @@ -755,11 +755,11 @@ func TestFillFields(t *testing.T) { results := map[string]interface{}{} newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complexPoint, results) - assert.Equal(t, map[string]interface{}{}, results) + require.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalarPoint, results) - assert.Equal(t, map[string]interface{}{}, results) + require.Equal(t, map[string]interface{}{}, results) } func setupServer(resp string) *httptest.Server { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 1aff773a5d8cf..777d7261dd175 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -159,13 +159,13 @@ type ConsumerGroup interface { } type ConsumerGroupCreator interface { - Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) + Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) } type SaramaCreator struct{} -func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { - return sarama.NewConsumerGroup(brokers, group, config) +func (*SaramaCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, cfg) } func (k *KafkaConsumer) SampleConfig() string { @@ -191,31 +191,31 @@ func (k *KafkaConsumer) Init() error { k.ConsumerGroup = defaultConsumerGroup } - config := sarama.NewConfig() + cfg := sarama.NewConfig() // Kafka version 0.10.2.0 is required for consumer groups. - config.Version = sarama.V0_10_2_0 + cfg.Version = sarama.V0_10_2_0 - if err := k.SetConfig(config); err != nil { + if err := k.SetConfig(cfg); err != nil { return err } switch strings.ToLower(k.Offset) { case "oldest", "": - config.Consumer.Offsets.Initial = sarama.OffsetOldest + cfg.Consumer.Offsets.Initial = sarama.OffsetOldest case "newest": - config.Consumer.Offsets.Initial = sarama.OffsetNewest + cfg.Consumer.Offsets.Initial = sarama.OffsetNewest default: return fmt.Errorf("invalid offset %q", k.Offset) } switch strings.ToLower(k.BalanceStrategy) { case "range", "": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange case "roundrobin": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin case "sticky": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky default: return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) } @@ -224,9 +224,9 @@ func (k *KafkaConsumer) Init() error { k.ConsumerCreator = &SaramaCreator{} } - config.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) + cfg.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) - k.config = config + k.config = cfg return nil } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 7d31dad92549d..55769a72404df 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -43,10 +43,10 @@ type FakeCreator struct { ConsumerGroup *FakeConsumerGroup } -func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { +func (c *FakeCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { c.ConsumerGroup.brokers = brokers c.ConsumerGroup.group = group - c.ConsumerGroup.config = config + c.ConsumerGroup.config = cfg return c.ConsumerGroup, nil } diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index ccc020edb4fb6..6efce2ba5c4b1 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -67,17 +67,17 @@ func (p Packet) Compile() (payload []byte, err error) { var padding [PacketPaddingSize]byte if err = binary.Write(&buffer, binary.LittleEndian, &size); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Challenge); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Type); nil != err { - return + return nil, err } - if _, err := buffer.WriteString(p.Body); err != nil { + if _, err = buffer.WriteString(p.Body); err != nil { return nil, err } - if _, err := buffer.Write(padding[:]); err != nil { + if _, err = buffer.Write(padding[:]); err != nil { return nil, err } @@ -95,16 +95,13 @@ func NewPacket(challenge, typ int32, body string) (packet *Packet) { // or a potential error. func (c *Client) Authorize(password string) (response *Packet, err error) { if response, err = c.Send(Auth, password); nil == err { - if response.Header.Type == AuthResponse { - c.Authorized = true - } else { - err = ErrFailedAuthorization - response = nil - return + if response.Header.Type != AuthResponse { + return nil, ErrFailedAuthorization } + c.Authorized = true } - return + return response, err } // Execute calls Send with the appropriate command type and the provided @@ -114,7 +111,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) { return c.Send(Exec, command) } -// Sends accepts the commands type and its string to execute to the clients server, +// Send accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, // and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned @@ -213,5 +210,5 @@ func NewClient(host string, port int) (client *Client, err error) { client.Host = host client.Port = port client.Connection, err = net.Dial("tcp", fmt.Sprintf("%v:%v", client.Host, client.Port)) - return + return client, err } diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 97bfa3709c113..14315e5fe0e2d 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -52,7 +52,6 @@ type OpcUA struct { opts []opcua.Option } -// OPCTag type type NodeSettings struct { FieldName string `toml:"name"` Namespace string `toml:"namespace"` @@ -476,7 +475,7 @@ func (o *OpcUA) setupOptions() error { } } - o.opts, err = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) + o.opts, err = o.generateClientOpts(endpoints) return err } diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index e1304fa304fc6..0afe07115e197 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "log" "math/big" "net" "net/url" @@ -146,7 +145,7 @@ func pemBlockForKey(priv interface{}) (*pem.Block, error) { } //revive:disable-next-line -func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) ([]opcua.Option, error) { +func (o *OpcUA) generateClientOpts(endpoints []*ua.EndpointDescription) ([]opcua.Option, error) { opts := []opcua.Option{} appuri := "urn:telegraf:gopcua:client" appname := "Telegraf" @@ -154,13 +153,16 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, // ApplicationURI is automatically read from the cert so is not required if a cert if provided opts = append(opts, opcua.ApplicationURI(appuri)) opts = append(opts, opcua.ApplicationName(appname)) + opts = append(opts, opcua.RequestTimeout(time.Duration(o.RequestTimeout))) - opts = append(opts, opcua.RequestTimeout(requestTimeout)) - + certFile := o.Certificate + keyFile := o.PrivateKey + policy := o.SecurityPolicy + mode := o.SecurityMode var err error if certFile == "" && keyFile == "" { if policy != "None" || mode != "None" { - certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, 365*24*time.Hour) if err != nil { return nil, err } @@ -172,7 +174,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, debug.Printf("Loading cert/key from %s/%s", certFile, keyFile) c, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { - log.Printf("Failed to load certificate: %s", err) + o.Log.Warnf("Failed to load certificate: %s", err) } else { pk, ok := c.PrivateKey.(*rsa.PrivateKey) if !ok { @@ -198,7 +200,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } // Select the most appropriate authentication mode from server capabilities and user input - authMode, authOption, err := generateAuth(auth, cert, username, password) + authMode, authOption, err := o.generateAuth(o.AuthMethod, cert, o.Username, o.Password) if err != nil { return nil, err } @@ -276,7 +278,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, return opts, nil } -func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { +func (o *OpcUA) generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { var err error var authMode ua.UserTokenType @@ -313,7 +315,7 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua authOption = opcua.AuthIssuedToken([]byte(nil)) default: - log.Printf("unknown auth-mode, defaulting to Anonymous") + o.Log.Warnf("unknown auth-mode, defaulting to Anonymous") authMode = ua.UserTokenTypeAnonymous authOption = opcua.AuthAnonymous() } From 7675ce6d1932a36851558cb655da2961c66c3652 Mon Sep 17 00:00:00 2001 From: Mya Date: Tue, 30 Nov 2021 15:47:50 -0700 Subject: [PATCH 094/133] refactor: snmp to use gosmi (#9518) --- internal/snmp/config.go | 2 + internal/snmp/translate.go | 188 ++ plugins/inputs/snmp/README.md | 24 +- plugins/inputs/snmp/snmp.go | 174 +- plugins/inputs/snmp/snmp_mocks_generate.go | 103 - plugins/inputs/snmp/snmp_mocks_test.go | 93 - plugins/inputs/snmp/snmp_test.go | 283 +- plugins/inputs/snmp/testdata/bridgeMib | 1467 +++++++++ plugins/inputs/snmp/testdata/bridgeMibImports | 554 ++++ plugins/inputs/snmp/testdata/foo | 30 + plugins/inputs/snmp/testdata/fooImports | 169 ++ plugins/inputs/snmp/testdata/ifPhysAddress | 84 + .../inputs/snmp/testdata/ifPhysAddressImports | 254 ++ plugins/inputs/snmp/testdata/server | 57 + plugins/inputs/snmp/testdata/serverImports | 174 ++ plugins/inputs/snmp/testdata/snmpd.conf | 17 - plugins/inputs/snmp/testdata/tableBuild | 57 + plugins/inputs/snmp/testdata/tableMib | 2613 +++++++++++++++++ plugins/inputs/snmp/testdata/tableMibImports | 119 + plugins/inputs/snmp/testdata/tcpMib | 786 +++++ plugins/inputs/snmp/testdata/tcpMibImports | 639 ++++ plugins/inputs/snmp/testdata/test.mib | 97 - plugins/inputs/snmp_trap/README.md | 5 + plugins/inputs/snmp_trap/snmp_trap.go | 88 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 305 +- 25 files changed, 7574 insertions(+), 808 deletions(-) create mode 100644 internal/snmp/translate.go delete mode 100644 plugins/inputs/snmp/snmp_mocks_generate.go delete mode 100644 plugins/inputs/snmp/snmp_mocks_test.go create mode 100644 plugins/inputs/snmp/testdata/bridgeMib create mode 100644 plugins/inputs/snmp/testdata/bridgeMibImports create mode 100644 plugins/inputs/snmp/testdata/foo create mode 100644 plugins/inputs/snmp/testdata/fooImports create mode 100644 plugins/inputs/snmp/testdata/ifPhysAddress create mode 100644 plugins/inputs/snmp/testdata/ifPhysAddressImports create mode 100644 plugins/inputs/snmp/testdata/server create mode 100644 plugins/inputs/snmp/testdata/serverImports delete mode 100644 plugins/inputs/snmp/testdata/snmpd.conf create mode 100644 plugins/inputs/snmp/testdata/tableBuild create mode 100644 plugins/inputs/snmp/testdata/tableMib create mode 100644 plugins/inputs/snmp/testdata/tableMibImports create mode 100644 plugins/inputs/snmp/testdata/tcpMib create mode 100644 plugins/inputs/snmp/testdata/tcpMibImports delete mode 100644 plugins/inputs/snmp/testdata/test.mib diff --git a/internal/snmp/config.go b/internal/snmp/config.go index 0a200b7067787..4ad1d3a0cd3e3 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -10,6 +10,8 @@ type ClientConfig struct { Retries int `toml:"retries"` // Values: 1, 2, 3 Version uint8 `toml:"version"` + // Path to mib files + Path []string `toml:"path"` // Parameters for Version 1 & 2 Community string `toml:"community"` diff --git a/internal/snmp/translate.go b/internal/snmp/translate.go new file mode 100644 index 0000000000000..ebb905112d0cb --- /dev/null +++ b/internal/snmp/translate.go @@ -0,0 +1,188 @@ +package snmp + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" +) + +// must init, append path for each directory, load module for every file +// or gosmi will fail without saying why +var m sync.Mutex + +func LoadMibsFromPath(paths []string, log telegraf.Logger) error { + m.Lock() + defer m.Unlock() + gosmi.Init() + var folders []string + for _, mibPath := range paths { + gosmi.AppendPath(mibPath) + folders = append(folders, mibPath) + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + // symlinks are files so we need to double check if any of them are folders + // Will check file vs directory later on + if info.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(path) + if err != nil { + log.Warnf("Bad symbolic link %v", link) + } + folders = append(folders, link) + } + return nil + }) + if err != nil { + return fmt.Errorf("Filepath could not be walked %v", err) + } + for _, folder := range folders { + err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + // checks if file or directory + if info.IsDir() { + gosmi.AppendPath(path) + } else if info.Mode()&os.ModeSymlink == 0 { + _, err := gosmi.LoadModule(info.Name()) + if err != nil { + log.Warnf("Module could not be loaded %v", err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("Filepath could not be walked %v", err) + } + } + folders = []string{} + } + return nil +} + +// The following is for snmp_trap +type MibEntry struct { + MibName string + OidText string +} + +func TrapLookup(oid string) (e MibEntry, err error) { + var node gosmi.SmiNode + node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + + // ensure modules are loaded or node will be empty (might not error) + if err != nil { + return e, err + } + + e.OidText = node.RenderQualified() + + i := strings.Index(e.OidText, "::") + if i == -1 { + return e, fmt.Errorf("not found") + } + e.MibName = e.OidText[:i] + e.OidText = e.OidText[i+2:] + return e, nil +} + +// The following is for snmp + +func GetIndex(oidNum string, mibPrefix string) (col []string, tagOids map[string]struct{}, err error) { + // first attempt to get the table's tags + tagOids = map[string]struct{}{} + + // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName + node, err := gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + + if err != nil { + return []string{}, map[string]struct{}{}, fmt.Errorf("getting submask: %w", err) + } + + for _, index := range node.GetIndex() { + //nolint:staticcheck //assaignment to nil map to keep backwards compatibilty + tagOids[mibPrefix+index.Name] = struct{}{} + } + + // grabs all columns from the table + // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName + col = node.GetRow().AsTable().ColumnOrder + + return col, tagOids, nil +} + +//nolint:revive //Too many return variable but necessary +func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + var out gosmi.SmiNode + var end string + if strings.ContainsAny(oid, "::") { + // split given oid + // for example RFC1213-MIB::sysUpTime.0 + s := strings.Split(oid, "::") + // node becomes sysUpTime.0 + node := s[1] + if strings.ContainsAny(node, ".") { + s = strings.Split(node, ".") + // node becomes sysUpTime + node = s[0] + end = "." + s[1] + } + + out, err = gosmi.GetNode(node) + if err != nil { + return oid, oid, oid, oid, err + } + + oidNum = "." + out.RenderNumeric() + end + } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { + //handle mixed oid ex. .iso.2.3 + s := strings.Split(oid, ".") + for i := range s { + if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { + out, err = gosmi.GetNode(s[i]) + if err != nil { + return oid, oid, oid, oid, err + } + s[i] = out.RenderNumeric() + } + } + oidNum = strings.Join(s, ".") + out, _ = gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + } else { + out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + oidNum = oid + // ensure modules are loaded or node will be empty (might not error) + // do not return the err as the oid is numeric and telegraf can continue + //nolint:nilerr + if err != nil || out.Name == "iso" { + return oid, oid, oid, oid, nil + } + } + + tc := out.GetSubtree() + + for i := range tc { + // case where the mib doesn't have a conversion so Type struct will be nil + // prevents seg fault + if tc[i].Type == nil { + break + } + switch tc[i].Type.Name { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": + conversion = "ipaddr" + } + } + + oidText = out.RenderQualified() + i := strings.Index(oidText, "::") + if i == -1 { + return "", oid, oid, oid, fmt.Errorf("not found") + } + mibName = oidText[:i] + oidText = oidText[i+2:] + end + + return mibName, oidNum, oidText, conversion, nil +} diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index b9cb69a5fedf5..27158133efe6b 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -4,19 +4,10 @@ The `snmp` input plugin uses polling to gather metrics from SNMP agents. Support for gathering individual OIDs as well as complete SNMP tables is included. -## Prerequisites +## Note about Paths -This plugin uses the `snmptable` and `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. - -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable ## Configuration @@ -38,6 +29,9 @@ information. ## SNMP version; can be 1, 2, or 3. # version = 2 + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## SNMP community string. # community = "public" @@ -260,7 +254,7 @@ oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" Partial result (removed agent_host and host columns from all following outputs in this section): -```shell +```text > ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 > ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 > ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 @@ -313,7 +307,7 @@ is_tag = true Result: -```shell +```text > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 @@ -357,7 +351,5 @@ interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdmin interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 ``` -[net-snmp]: http://www.net-snmp.org/ -[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK [metric filtering]: /docs/CONFIGURATION.md#metric-filtering [metric]: /docs/METRICS.md diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index c4a2b80b28174..193332959dbfa 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -1,15 +1,11 @@ package snmp import ( - "bufio" - "bytes" "encoding/binary" "errors" "fmt" - "log" "math" "net" - "os/exec" "strconv" "strings" "sync" @@ -21,7 +17,6 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/wlog" ) const description = `Retrieves SNMP values from remote agents` @@ -42,6 +37,9 @@ const sampleConfig = ` ## SNMP version; can be 1, 2, or 3. # version = 2 + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## Agent host tag; the tag used to reference the source host # agent_host_tag = "agent_host" @@ -70,36 +68,12 @@ const sampleConfig = ` # priv_protocol = "" ## Privacy password used for encrypted messages. # priv_password = "" - + ## Add fields and tables defining the variables you wish to collect. This ## example collects the system uptime and interface variables. Reference the ## full plugin documentation for configuration details. ` -// execCommand is so tests can mock out exec.Command usage. -var execCommand = exec.Command - -// execCmd executes the specified command, returning the STDOUT content. -// If command exits with error status, the output is captured into the returned error. -func execCmd(arg0 string, args ...string) ([]byte, error) { - if wlog.LogLevel() == wlog.DEBUG { - quoted := make([]string, 0, len(args)) - for _, arg := range args { - quoted = append(quoted, fmt.Sprintf("%q", arg)) - } - log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) - } - - out, err := execCommand(arg0, args...).Output() - if err != nil { - if err, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err) - } - return nil, err - } - return out, nil -} - // Snmp holds the configuration for the plugin. type Snmp struct { // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. @@ -120,12 +94,14 @@ type Snmp struct { Fields []Field `toml:"field"` connectionCache []snmpConnection - initialized bool + + Log telegraf.Logger `toml:"-"` } -func (s *Snmp) init() error { - if s.initialized { - return nil +func (s *Snmp) Init() error { + err := snmp.LoadMibsFromPath(s.Path, s.Log) + if err != nil { + return err } s.connectionCache = make([]snmpConnection, len(s.Agents)) @@ -146,7 +122,6 @@ func (s *Snmp) init() error { s.AgentHostTag = "agent_host" } - s.initialized = true return nil } @@ -352,6 +327,7 @@ func init() { MaxRepetitions: 10, Timeout: config.Duration(5 * time.Second), Version: 2, + Path: []string{"/usr/share/snmp/mibs"}, Community: "public", }, } @@ -372,10 +348,6 @@ func (s *Snmp) Description() string { // Any error encountered does not halt the process. The errors are accumulated // and returned at the end. func (s *Snmp) Gather(acc telegraf.Accumulator) error { - if err := s.init(); err != nil { - return err - } - var wg sync.WaitGroup for i, agent := range s.Agents { wg.Add(1) @@ -835,6 +807,7 @@ var snmpTableCachesLock sync.Mutex // snmpTable resolves the given OID as a table, providing information about the // table and fields within. +//nolint:revive //Too many return variable but necessary func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { snmpTableCachesLock.Lock() if snmpTableCaches == nil { @@ -852,6 +825,7 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err } +//nolint:revive //Too many return variable but necessary func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { mibName, oidNum, oidText, _, err = SnmpTranslate(oid) if err != nil { @@ -859,53 +833,12 @@ func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, f } mibPrefix := mibName + "::" - oidFullName := mibPrefix + oidText - - // first attempt to get the table's tags - tagOids := map[string]struct{}{} - // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - for scanner.Scan() { - line := scanner.Text() - - if !strings.HasPrefix(line, " INDEX") { - continue - } - i := strings.Index(line, "{ ") - if i == -1 { // parse error - continue - } - line = line[i+2:] - i = strings.Index(line, " }") - if i == -1 { // parse error - continue - } - line = line[:i] - for _, col := range strings.Split(line, ", ") { - tagOids[mibPrefix+col] = struct{}{} - } - } - } + col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix) - // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) - if err != nil { - return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) - } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - scanner.Scan() - cols := scanner.Text() - if len(cols) == 0 { - return "", "", "", nil, fmt.Errorf("could not find any columns in table") - } - for _, col := range strings.Split(cols, " ") { - if len(col) == 0 { - continue - } - _, isTag := tagOids[mibPrefix+col] - fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) + for _, c := range col { + _, isTag := tagOids[mibPrefix+c] + fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag}) } return mibName, oidNum, oidText, fields, err @@ -923,6 +856,7 @@ var snmpTranslateCachesLock sync.Mutex var snmpTranslateCaches map[string]snmpTranslateCache // snmpTranslate resolves the given OID. +//nolint:revive //Too many return variable but necessary func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { snmpTranslateCachesLock.Lock() if snmpTranslateCaches == nil { @@ -940,7 +874,7 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // is worth it. Especially when it would slam the system pretty hard if lots // of lookups are being performed. - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmp.SnmpTranslateCall(oid) snmpTranslateCaches[oid] = stc } @@ -948,73 +882,3 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } - -func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - var out []byte - if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) - } else { - out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) - if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound { - // Silently discard error if snmptranslate not found and we have a numeric OID. - // Meaning we can get by without the lookup. - return "", oid, oid, "", nil - } - } - if err != nil { - return "", "", "", "", err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if !ok && scanner.Err() != nil { - return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err()) - } - - oidText = scanner.Text() - - i := strings.Index(oidText, "::") - if i == -1 { - // was not found in MIB. - if bytes.Contains(out, []byte("[TRUNCATED]")) { - return "", oid, oid, "", nil - } - // not truncated, but not fully found. We still need to parse out numeric OID, so keep going - oidText = oid - } else { - mibName = oidText[:i] - oidText = oidText[i+2:] - } - - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") { - tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ") - switch tc { - case "MacAddress", "PhysAddress": - conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": - conversion = "ipaddr" - } - } else if strings.HasPrefix(line, "::= { ") { - objs := strings.TrimPrefix(line, "::= { ") - objs = strings.TrimSuffix(objs, " }") - - for _, obj := range strings.Split(objs, " ") { - if len(obj) == 0 { - continue - } - if i := strings.Index(obj, "("); i != -1 { - obj = obj[i+1:] - oidNum += "." + obj[:strings.Index(obj, ")")] - } else { - oidNum += "." + obj - } - } - break - } - } - - return mibName, oidNum, oidText, conversion, nil -} diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go deleted file mode 100644 index f87f9029b0d06..0000000000000 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build generate -// +build generate - -package main - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" -) - -// This file is a generator used to generate the mocks for the commands used by the tests. - -// These are the commands to be mocked. -var mockedCommands = [][]string{ - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, - {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server.0"}, - {"snmptranslate", "-Td", "-Ob", "TEST::testTable"}, - {"snmptranslate", "-Td", "-Ob", "TEST::connections"}, - {"snmptranslate", "-Td", "-Ob", "TEST::latency"}, - {"snmptranslate", "-Td", "-Ob", "TEST::description"}, - {"snmptranslate", "-Td", "-Ob", "TEST::hostname"}, - {"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"}, - {"snmptranslate", "-Td", "TEST::testTable.1"}, - {"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"}, -} - -type mockedCommandResult struct { - stdout string - stderr string - exitError bool -} - -func main() { - if err := generate(); err != nil { - fmt.Fprintf(os.Stderr, "error: %s\n", err) - os.Exit(1) - } -} - -func generate() error { - f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644) - if err != nil { - return err - } - br := bufio.NewReader(f) - var i int64 - for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') { - i += int64(len(l)) - if l == "// BEGIN GO GENERATE CONTENT\n" { - break - } - } - f.Truncate(i) - f.Seek(i, 0) - - fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n") - - for _, cmd := range mockedCommands { - ec := exec.Command(cmd[0], cmd[1:]...) - out := bytes.NewBuffer(nil) - err := bytes.NewBuffer(nil) - ec.Stdout = out - ec.Stderr = err - ec.Env = []string{ - "MIBDIRS=+./testdata", - } - - var mcr mockedCommandResult - if err := ec.Run(); err != nil { - if err, ok := err.(*exec.ExitError); !ok { - mcr.exitError = true - } else { - return fmt.Errorf("executing %v: %s", cmd, err) - } - } - mcr.stdout = string(out.Bytes()) - mcr.stderr = string(err.Bytes()) - cmd0 := strings.Join(cmd, "\000") - mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix - fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv) - } - f.Write([]byte("}\n")) - f.Close() - - return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() -} diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go deleted file mode 100644 index 850f6b83830bc..0000000000000 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package snmp - -import ( - "fmt" - "os" - "os/exec" - "strings" - "testing" -) - -type mockedCommandResult struct { - stdout string - stderr string - exitError bool -} - -func mockExecCommand(arg0 string, args ...string) *exec.Cmd { - args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...) - cmd := exec.Command(os.Args[0], args...) - cmd.Stderr = os.Stderr // so the test output shows errors - return cmd -} - -// This is not a real test. This is just a way of mocking out commands. -// -// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 -func TestMockExecCommand(_ *testing.T) { - var cmd []string - for _, arg := range os.Args { - if arg == "--" { - cmd = []string{} - continue - } - if cmd == nil { - continue - } - cmd = append(cmd, arg) - } - if cmd == nil { - return - } - - cmd0 := strings.Join(cmd, "\000") - mcr, ok := mockedCommandResults[cmd0] - if !ok { - cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix - //nolint:errcheck,revive - fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) - //nolint:revive // error code is important for this "test" - os.Exit(1) - } - //nolint:errcheck,revive - fmt.Printf("%s", mcr.stdout) - //nolint:errcheck,revive - fmt.Fprintf(os.Stderr, "%s", mcr.stderr) - if mcr.exitError { - //nolint:revive // error code is important for this "test" - os.Exit(1) - } - //nolint:revive // error code is important for this "test" - os.Exit(0) -} - -func init() { - execCommand = mockExecCommand -} - -// BEGIN GO GENERATE CONTENT -var mockedCommandResults = map[string]mockedCommandResult{ - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::description": {stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00TEST::testTable.1": {stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, - "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, -} diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 5f0bd1bb39e25..7962bede41278 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,10 +1,9 @@ -//go:generate go run -tags generate snmp_mocks_generate.go package snmp import ( "fmt" "net" - "os/exec" + "path/filepath" "sync" "testing" "time" @@ -63,33 +62,42 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { var tsc = &testSNMPConnection{ host: "tsc", values: map[string]interface{}{ - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.1.2": []byte(""), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.2.2": 0, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "0.000", - ".1.0.0.0.1.3.3": "9.999", - ".1.0.0.0.1.5.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), - ".1.0.0.2.1.5.0.9.9": 11, - ".1.0.0.2.1.5.1.9.9": 22, - ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", - ".1.0.0.3.1.1.10": "instance", - ".1.0.0.3.1.1.11": "instance2", - ".1.0.0.3.1.1.12": "instance3", - ".1.0.0.3.1.2.10": 10, - ".1.0.0.3.1.2.11": 20, - ".1.0.0.3.1.2.12": 20, - ".1.0.0.3.1.3.10": 1, - ".1.0.0.3.1.3.11": 2, - ".1.0.0.3.1.3.12": 3, + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -104,6 +112,7 @@ func TestSampleConfig(t *testing.T) { ClientConfig: snmp.ClientConfig{ Timeout: config.Duration(5 * time.Second), Version: 2, + Path: []string{"/usr/share/snmp/mibs"}, Community: "public", MaxRepetitions: 10, Retries: 3, @@ -114,6 +123,17 @@ func TestSampleConfig(t *testing.T) { } func TestFieldInit(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + } + + err = s.Init() + require.NoError(t, err) + translations := []struct { inputOid string inputName string @@ -125,8 +145,6 @@ func TestFieldInit(t *testing.T) { {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, - {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, - {".999", "", "", ".999", ".999", ""}, {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, @@ -134,6 +152,7 @@ func TestFieldInit(t *testing.T) { {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, + {".999", "", "", ".999", ".999", ""}, } for _, txl := range translations { @@ -147,100 +166,111 @@ func TestFieldInit(t *testing.T) { } func TestTableInit(t *testing.T) { - tbl := Table{ - Oid: ".1.0.0.0", - Fields: []Field{ - {Oid: ".999", Name: "foo"}, - {Oid: "TEST::description", Name: "description", IsTag: true}, + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + Tables: []Table{ + {Oid: ".1.3.6.1.2.1.3.1", + Fields: []Field{ + {Oid: ".999", Name: "foo"}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true}, + {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"}, + }}, }, } - err := tbl.Init() + err = s.Init() require.NoError(t, err) - require.Equal(t, "testTable", tbl.Name) + require.Equal(t, "atTable", s.Tables[0].Name) - require.Len(t, tbl.Fields, 5) - require.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - require.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true}) + require.Len(t, s.Tables[0].Fields, 5) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".999", Name: "foo", initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true}) } func TestSnmpInit(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ Tables: []Table{ - {Oid: "TEST::testTable"}, + {Oid: "RFC1213-MIB::atTable"}, }, Fields: []Field{ - {Oid: "TEST::hostname"}, + {Oid: "RFC1213-MIB::atPhysAddress"}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, }, } - err := s.init() + err = s.Init() require.NoError(t, err) - require.Len(t, s.Tables[0].Fields, 4) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true}) + require.Len(t, s.Tables[0].Fields, 3) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true, initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", IsTag: true, initialized: true}) require.Equal(t, Field{ - Oid: ".1.0.0.1.1", - Name: "hostname", + Oid: ".1.3.6.1.2.1.3.1.1.2", + Name: "atPhysAddress", + Conversion: "hwaddr", initialized: true, }, s.Fields[0]) } func TestSnmpInit_noTranslate(t *testing.T) { - // override execCommand so it returns exec.ErrNotFound - defer func(ec func(string, ...string) *exec.Cmd) { execCommand = ec }(execCommand) - execCommand = func(_ string, _ ...string) *exec.Cmd { - return exec.Command("snmptranslateExecErrNotFound") - } - s := &Snmp{ Fields: []Field{ - {Oid: ".1.1.1.1", Name: "one", IsTag: true}, - {Oid: ".1.1.1.2", Name: "two"}, - {Oid: ".1.1.1.3"}, + {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, + {Oid: ".9.1.1.1.2", Name: "two"}, + {Oid: ".9.1.1.1.3"}, }, Tables: []Table{ {Name: "testing", Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, + {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".9.1.1.1.5", Name: "five"}, + {Oid: ".9.1.1.1.6"}, }}, }, + ClientConfig: snmp.ClientConfig{ + Path: []string{}, + }, } - err := s.init() + err := s.Init() require.NoError(t, err) - require.Equal(t, ".1.1.1.1", s.Fields[0].Oid) + require.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) require.Equal(t, "one", s.Fields[0].Name) require.Equal(t, true, s.Fields[0].IsTag) - require.Equal(t, ".1.1.1.2", s.Fields[1].Oid) + require.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) require.Equal(t, "two", s.Fields[1].Name) require.Equal(t, false, s.Fields[1].IsTag) - require.Equal(t, ".1.1.1.3", s.Fields[2].Oid) - require.Equal(t, ".1.1.1.3", s.Fields[2].Name) + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) require.Equal(t, false, s.Fields[2].IsTag) - require.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) + require.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) require.Equal(t, "four", s.Tables[0].Fields[0].Name) require.Equal(t, true, s.Tables[0].Fields[0].IsTag) - require.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid) + require.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) require.Equal(t, "five", s.Tables[0].Fields[1].Name) require.Equal(t, false, s.Tables[0].Fields[1].IsTag) - require.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - require.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name) + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) require.Equal(t, false, s.Tables[0].Fields[2].IsTag) } @@ -255,7 +285,7 @@ func TestSnmpInit_noName_noOid(t *testing.T) { }, } - err := s.init() + err := s.Init() require.Error(t, err) } @@ -269,7 +299,7 @@ func TestGetSNMPConnection_v2(t *testing.T) { Community: "foo", }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) @@ -305,7 +335,7 @@ func TestGetSNMPConnectionTCP(t *testing.T) { s := &Snmp{ Agents: []string{"tcp://127.0.0.1:56789"}, } - err := s.init() + err := s.Init() require.NoError(t, err) wg.Add(1) @@ -346,7 +376,7 @@ func TestGetSNMPConnection_v3(t *testing.T) { EngineTime: 2, }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) @@ -463,7 +493,7 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { s := tc.Config - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) @@ -491,7 +521,7 @@ func TestGetSNMPConnection_caching(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, } - err := s.init() + err := s.Init() require.NoError(t, err) gs1, err := s.getConnection(0) require.NoError(t, err) @@ -613,7 +643,7 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { require.Equal(t, (gs.Retries+1)*2, reqCount) } -func TestTableBuild_walk(t *testing.T) { +func TestTableBuild_walk_noTranslate(t *testing.T) { tbl := Table{ Name: "mytable", IndexAsTag: true, @@ -642,22 +672,11 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.2.1.5", OidIndexLength: 1, }, - { - Name: "myfield6", - Oid: ".1.0.0.0.1.6", - Translate: true, - }, - { - Name: "myfield7", - Oid: ".1.0.0.0.1.6", - Translate: false, - }, }, } tb, err := tbl.Build(tsc, true) require.NoError(t, err) - require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ @@ -669,8 +688,6 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(0.123), "myfield4": 11, "myfield5": 11, - "myfield6": "testTableEntry.7", - "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ @@ -709,6 +726,80 @@ func TestTableBuild_walk(t *testing.T) { require.Contains(t, tb.Rows, rtr4) } +func TestTableBuild_walk_Translate(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + } + err = s.Init() + require.NoError(t, err) + + tbl := Table{ + Name: "atTable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "ifIndex", + Oid: "1.3.6.1.2.1.3.1.1.1", + IsTag: true, + }, + { + Name: "atPhysAddress", + Oid: "1.3.6.1.2.1.3.1.1.2", + Translate: false, + }, + { + Name: "atNetAddress", + Oid: "1.3.6.1.2.1.3.1.1.3", + Translate: true, + }, + }, + } + + err = tbl.Init() + require.NoError(t, err) + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, tb.Name, "atTable") + + rtr1 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "foo", + "index": "0", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 1, + "atNetAddress": "atNetAddress", + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "bar", + "index": "1", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 2, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "index": "2", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 0, + }, + } + + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) +} + func TestTableBuild_noWalk(t *testing.T) { tbl := Table{ Name: "mytable", @@ -784,7 +875,6 @@ func TestGather(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} @@ -831,7 +921,6 @@ func TestGather_host(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} diff --git a/plugins/inputs/snmp/testdata/bridgeMib b/plugins/inputs/snmp/testdata/bridgeMib new file mode 100644 index 0000000000000..96f562732fd6a --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMib @@ -0,0 +1,1467 @@ +BRIDGE-MIB DEFINITIONS ::= BEGIN + +-- ---------------------------------------------------------- -- +-- MIB for IEEE 802.1D devices +-- ---------------------------------------------------------- -- +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, NOTIFICATION-TYPE, + Counter32, Integer32, TimeTicks, mib-2, TEXTUAL-CONVENTION, MacAddress, + MODULE-COMPLIANCE, NOTIFICATION-GROUP, OBJECT-GROUP, InterfaceIndex + FROM bridgeMibImports; + +dot1dBridge MODULE-IDENTITY + LAST-UPDATED "200509190000Z" + ORGANIZATION "IETF Bridge MIB Working Group" + CONTACT-INFO + "Email: bridge-mib@ietf.org + + K.C. Norseth (Editor) + L-3 Communications + Tel: +1 801-594-2809 + Email: kenyon.c.norseth@L-3com.com + Postal: 640 N. 2200 West. + Salt Lake City, Utah 84116-0850 + + Les Bell (Editor) + 3Com Europe Limited + Phone: +44 1442 438025 + Email: elbell@ntlworld.com + Postal: 3Com Centre, Boundary Way + Hemel Hempstead + Herts. HP2 7YU + UK + + Send comments to " + DESCRIPTION + "The Bridge MIB module for managing devices that support + IEEE 802.1D. + + Copyright (C) The Internet Society (2005). This version of + this MIB module is part of RFC 4188; see the RFC itself for + full legal notices." + REVISION "200509190000Z" + DESCRIPTION + "Third revision, published as part of RFC 4188. + + The MIB module has been converted to SMIv2 format. + Conformance statements have been added and some + description and reference clauses have been updated. + + The object dot1dStpPortPathCost32 was added to + support IEEE 802.1t and the permissible values of + dot1dStpPriority and dot1dStpPortPriority have been + clarified for bridges supporting IEEE 802.1t or + IEEE 802.1w. + + The interpretation of dot1dStpTimeSinceTopologyChange + has been clarified for bridges supporting the Rapid + Spanning Tree Protocol (RSTP)." + REVISION "199307310000Z" + DESCRIPTION + "Second revision, published as part of RFC 1493." + REVISION "199112310000Z" + DESCRIPTION + "Initial revision, published as part of RFC 1286." + ::= { mib-2 17 } + +-- ---------------------------------------------------------- -- +-- Textual Conventions +-- ---------------------------------------------------------- -- + +BridgeId ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "The Bridge-Identifier, as used in the Spanning Tree + Protocol, to uniquely identify a bridge. Its first two + octets (in network byte order) contain a priority value, + and its last 6 octets contain the MAC address used to + refer to a bridge in a unique fashion (typically, the + numerically smallest MAC address of all ports on the + bridge)." + SYNTAX OCTET STRING (SIZE (8)) + +Timeout ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A Spanning Tree Protocol (STP) timer in units of 1/100 + seconds. Several objects in this MIB module represent + values of timers used by the Spanning Tree Protocol. + In this MIB, these timers have values in units of + hundredths of a second (i.e., 1/100 secs). + + These timers, when stored in a Spanning Tree Protocol's + BPDU, are in units of 1/256 seconds. Note, however, that + 802.1D-1998 specifies a settable granularity of no more + than one second for these timers. To avoid ambiguity, + a conversion algorithm is defined below for converting + between the different units, which ensures a timer's + value is not distorted by multiple conversions. + + To convert a Timeout value into a value in units of + 1/256 seconds, the following algorithm should be used: + + b = floor( (n * 256) / 100) + + where: + floor = quotient [ignore remainder] + n is the value in 1/100 second units + b is the value in 1/256 second units + + To convert the value from 1/256 second units back to + 1/100 seconds, the following algorithm should be used: + + n = ceiling( (b * 100) / 256) + + where: + ceiling = quotient [if remainder is 0], or + quotient + 1 [if remainder is nonzero] + n is the value in 1/100 second units + + b is the value in 1/256 second units + + Note: it is important that the arithmetic operations are + done in the order specified (i.e., multiply first, + divide second)." + SYNTAX Integer32 + +-- ---------------------------------------------------------- -- +-- subtrees in the Bridge MIB +-- ---------------------------------------------------------- -- + +dot1dNotifications OBJECT IDENTIFIER ::= { dot1dBridge 0 } + +dot1dBase OBJECT IDENTIFIER ::= { dot1dBridge 1 } +dot1dStp OBJECT IDENTIFIER ::= { dot1dBridge 2 } + +dot1dSr OBJECT IDENTIFIER ::= { dot1dBridge 3 } +-- documented in RFC 1525 + +dot1dTp OBJECT IDENTIFIER ::= { dot1dBridge 4 } +dot1dStatic OBJECT IDENTIFIER ::= { dot1dBridge 5 } + +-- Subtrees used by Bridge MIB Extensions: +-- pBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 6 } +-- qBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 7 } +-- Note that the practice of registering related MIB modules +-- below dot1dBridge has been discouraged since there is no +-- robust mechanism to track such registrations. + +dot1dConformance OBJECT IDENTIFIER ::= { dot1dBridge 8 } + +-- ---------------------------------------------------------- -- +-- the dot1dBase subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dBase subtree is mandatory for all +-- bridges. +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The MAC address used by this bridge when it must be + referred to in a unique fashion. It is recommended + that this be the numerically smallest MAC address of + all ports that belong to this bridge. However, it is only + + required to be unique. When concatenated with + dot1dStpPriority, a unique BridgeIdentifier is formed, + which is used in the Spanning Tree Protocol." + REFERENCE + "IEEE 802.1D-1998: clauses 14.4.1.1.3 and 7.12.5" + ::= { dot1dBase 1 } + +dot1dBaseNumPorts OBJECT-TYPE + SYNTAX Integer32 + UNITS "ports" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of ports controlled by this bridging + entity." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.1.1.3" + ::= { dot1dBase 2 } + +dot1dBaseType OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + transparent-only(2), + sourceroute-only(3), + srt(4) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Indicates what type of bridging this bridge can + perform. If a bridge is actually performing a + certain type of bridging, this will be indicated by + entries in the port table for the given type." + ::= { dot1dBase 3 } + +-- ---------------------------------------------------------- -- +-- The Generic Bridge Port Table +-- ---------------------------------------------------------- -- +dot1dBasePortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains generic information about every + port that is associated with this bridge. Transparent, + source-route, and srt ports are included." + ::= { dot1dBase 4 } + +dot1dBasePortEntry OBJECT-TYPE + SYNTAX Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of the bridge." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.2, 14.6.1" + INDEX { dot1dBasePort } + ::= { dot1dBasePortTable 1 } + +Dot1dBasePortEntry ::= + SEQUENCE { + dot1dBasePort + Integer32, + dot1dBasePortIfIndex + InterfaceIndex, + dot1dBasePortCircuit + OBJECT IDENTIFIER, + dot1dBasePortDelayExceededDiscards + Counter32, + dot1dBasePortMtuExceededDiscards + Counter32 + } + +dot1dBasePort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains bridge management information." + ::= { dot1dBasePortEntry 1 } + +dot1dBasePortIfIndex OBJECT-TYPE + SYNTAX InterfaceIndex + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of the instance of the ifIndex object, + defined in IF-MIB, for the interface corresponding + to this port." + ::= { dot1dBasePortEntry 2 } + +dot1dBasePortCircuit OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "For a port that (potentially) has the same value of + dot1dBasePortIfIndex as another port on the same bridge. + This object contains the name of an object instance + unique to this port. For example, in the case where + multiple ports correspond one-to-one with multiple X.25 + virtual circuits, this value might identify an (e.g., + the first) object instance associated with the X.25 + virtual circuit corresponding to this port. + + For a port which has a unique value of + dot1dBasePortIfIndex, this object can have the value + { 0 0 }." + ::= { dot1dBasePortEntry 3 } + +dot1dBasePortDelayExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to excessive transit delay through the bridge. It + is incremented by both transparent and source + route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 4 } + +dot1dBasePortMtuExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to an excessive size. It is incremented by both + transparent and source route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 5 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dStp subtree is optional. It is +-- implemented by those bridges that support the Spanning Tree +-- Protocol. +-- ---------------------------------------------------------- -- + +dot1dStpProtocolSpecification OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + decLb100(2), + ieee8021d(3) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An indication of what version of the Spanning Tree + Protocol is being run. The value 'decLb100(2)' + indicates the DEC LANbridge 100 Spanning Tree protocol. + IEEE 802.1D implementations will return 'ieee8021d(3)'. + If future versions of the IEEE Spanning Tree Protocol + that are incompatible with the current version + are released a new value will be defined." + ::= { dot1dStp 1 } + +dot1dStpPriority OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the write-able portion of the Bridge ID + (i.e., the first two octets of the (8 octet long) Bridge + ID). The other (last) 6 octets of the Bridge ID are + given by the value of dot1dBaseBridgeAddress. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-61440, in steps of 4096." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStp 2 } + +dot1dStpTimeSinceTopologyChange OBJECT-TYPE + SYNTAX TimeTicks + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The time (in hundredths of a second) since the + last time a topology change was detected by the + bridge entity. + For RSTP, this reports the time since the tcWhile + timer for any port on this Bridge was nonzero." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1., + IEEE 802.1w clause 14.8.1.1." + ::= { dot1dStp 3 } + +dot1dStpTopChanges OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of topology changes detected by + this bridge since the management entity was last + reset or initialized." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1." + ::= { dot1dStp 4 } + +dot1dStpDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The bridge identifier of the root of the spanning + tree, as determined by the Spanning Tree Protocol, + as executed by this node. This value is used as + the Root Identifier parameter in all Configuration + Bridge PDUs originated by this node." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.1" + ::= { dot1dStp 5 } + +dot1dStpRootCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The cost of the path to the root as seen from + this bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.2" + ::= { dot1dStp 6 } + +dot1dStpRootPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port that offers the lowest + cost path from this bridge to the root bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.3" + ::= { dot1dStp 7 } + +dot1dStpMaxAge OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum age of Spanning Tree Protocol information + learned from the network on any port before it is + discarded, in units of hundredths of a second. This is + the actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.4" + ::= { dot1dStp 8 } + +dot1dStpHelloTime OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The amount of time between the transmission of + Configuration bridge PDUs by this node on any port when + it is the root of the spanning tree, or trying to become + so, in units of hundredths of a second. This is the + actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.5" + ::= { dot1dStp 9 } + +dot1dStpHoldTime OBJECT-TYPE + SYNTAX Integer32 + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value determines the interval length + during which no more than two Configuration bridge + PDUs shall be transmitted by this node, in units + of hundredths of a second." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.14" + ::= { dot1dStp 10 } + +dot1dStpForwardDelay OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value, measured in units of hundredths of a + second, controls how fast a port changes its spanning + state when moving towards the Forwarding state. The + value determines how long the port stays in each of the + Listening and Learning states, which precede the + Forwarding state. This value is also used when a + topology change has been detected and is underway, to + age all dynamic entries in the Forwarding Database. + [Note that this value is the one that this bridge is + currently using, in contrast to + dot1dStpBridgeForwardDelay, which is the value that this + bridge and all others would start using if/when this + bridge were to become the root.]" + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.6" + ::= { dot1dStp 11 } + +dot1dStpBridgeMaxAge OBJECT-TYPE + SYNTAX Timeout (600..4000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for MaxAge when this + bridge is acting as the root. Note that 802.1D-1998 + specifies that the range for this parameter is related + to the value of dot1dStpBridgeHelloTime. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.8" + ::= { dot1dStp 12 } + +dot1dStpBridgeHelloTime OBJECT-TYPE + SYNTAX Timeout (100..1000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for HelloTime when this + bridge is acting as the root. The granularity of this + timer is specified by 802.1D-1998 to be 1 second. An + agent may return a badValue error if a set is attempted + + to a value that is not a whole number of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.9" + ::= { dot1dStp 13 } + +dot1dStpBridgeForwardDelay OBJECT-TYPE + SYNTAX Timeout (400..3000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for ForwardDelay when + this bridge is acting as the root. Note that + 802.1D-1998 specifies that the range for this parameter + is related to the value of dot1dStpBridgeMaxAge. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.10" + ::= { dot1dStp 14 } + +-- ---------------------------------------------------------- -- +-- The Spanning Tree Port Table +-- ---------------------------------------------------------- -- + +dot1dStpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains port-specific information + for the Spanning Tree Protocol." + ::= { dot1dStp 15 } + +dot1dStpPortEntry OBJECT-TYPE + SYNTAX Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information maintained by every port about + the Spanning Tree Protocol state for that port." + INDEX { dot1dStpPort } + ::= { dot1dStpPortTable 1 } + +Dot1dStpPortEntry ::= + SEQUENCE { + + dot1dStpPort + Integer32, + dot1dStpPortPriority + Integer32, + dot1dStpPortState + INTEGER, + dot1dStpPortEnable + INTEGER, + dot1dStpPortPathCost + Integer32, + dot1dStpPortDesignatedRoot + BridgeId, + dot1dStpPortDesignatedCost + Integer32, + dot1dStpPortDesignatedBridge + BridgeId, + dot1dStpPortDesignatedPort + OCTET STRING, + dot1dStpPortForwardTransitions + Counter32, + dot1dStpPortPathCost32 + Integer32 + } + +dot1dStpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Spanning Tree Protocol management information." + REFERENCE + "IEEE 802.1D-1998: clause 14.8.2.1.2" + ::= { dot1dStpPortEntry 1 } + +dot1dStpPortPriority OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the priority field that is contained in + the first (in network byte order) octet of the (2 octet + long) Port ID. The other octet of the Port ID is given + by the value of dot1dStpPort. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-240, in steps of 16." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStpPortEntry 2 } + +dot1dStpPortState OBJECT-TYPE + SYNTAX INTEGER { + disabled(1), + blocking(2), + listening(3), + learning(4), + forwarding(5), + broken(6) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port's current state, as defined by application of + the Spanning Tree Protocol. This state controls what + action a port takes on reception of a frame. If the + bridge has detected a port that is malfunctioning, it + will place that port into the broken(6) state. For + ports that are disabled (see dot1dStpPortEnable), this + object will have a value of disabled(1)." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 3 } + +dot1dStpPortEnable OBJECT-TYPE + SYNTAX INTEGER { + enabled(1), + disabled(2) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The enabled/disabled status of the port." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 4 } + +dot1dStpPortPathCost OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + + the speed of the attached LAN. + + New implementations should support dot1dStpPortPathCost32. + If the port path costs exceeds the maximum value of this + object then this object should report the maximum value, + namely 65535. Applications should try to read the + dot1dStpPortPathCost32 object if this object reports + the maximum value." + REFERENCE "IEEE 802.1D-1998: clause 8.5.5.3" + ::= { dot1dStpPortEntry 5 } + +dot1dStpPortDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The unique Bridge Identifier of the Bridge + recorded as the Root in the Configuration BPDUs + transmitted by the Designated Bridge for the + segment to which the port is attached." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.4" + ::= { dot1dStpPortEntry 6 } + +dot1dStpPortDesignatedCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The path cost of the Designated Port of the segment + connected to this port. This value is compared to the + Root Path Cost field in received bridge PDUs." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.5" + ::= { dot1dStpPortEntry 7 } + +dot1dStpPortDesignatedBridge OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Bridge Identifier of the bridge that this + port considers to be the Designated Bridge for + this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.6" + ::= { dot1dStpPortEntry 8 } + +dot1dStpPortDesignatedPort OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (2)) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Port Identifier of the port on the Designated + Bridge for this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.7" + ::= { dot1dStpPortEntry 9 } + +dot1dStpPortForwardTransitions OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times this port has transitioned + from the Learning state to the Forwarding state." + ::= { dot1dStpPortEntry 10 } + +dot1dStpPortPathCost32 OBJECT-TYPE + SYNTAX Integer32 (1..200000000) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + the speed of the attached LAN. + + This object replaces dot1dStpPortPathCost to support + IEEE 802.1t." + REFERENCE + "IEEE 802.1t clause 8.10.2, Table 8-5." + ::= { dot1dStpPortEntry 11 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dTp subtree is optional. It is +-- implemented by those bridges that support the transparent +-- bridging mode. A transparent or SRT bridge will implement +-- this subtree. +-- ---------------------------------------------------------- -- + +dot1dTpLearnedEntryDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of Forwarding Database entries that + have been or would have been learned, but have been + discarded due to a lack of storage space in the + Forwarding Database. If this counter is increasing, it + indicates that the Forwarding Database is regularly + becoming full (a condition that has unpleasant + performance effects on the subnetwork). If this counter + has a significant value but is not presently increasing, + it indicates that the problem has been occurring but is + not persistent." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 1 } + +dot1dTpAgingTime OBJECT-TYPE + SYNTAX Integer32 (10..1000000) + UNITS "seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The timeout period in seconds for aging out + dynamically-learned forwarding information. + 802.1D-1998 recommends a default of 300 seconds." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 2 } + +-- ---------------------------------------------------------- -- +-- The Forwarding Database for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpFdbTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about unicast + entries for which the bridge has forwarding and/or + filtering information. This information is used + by the transparent bridging function in + determining how to propagate a received frame." + ::= { dot1dTp 3 } + +dot1dTpFdbEntry OBJECT-TYPE + SYNTAX Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Information about a specific unicast MAC address + for which the bridge has some forwarding and/or + filtering information." + INDEX { dot1dTpFdbAddress } + ::= { dot1dTpFdbTable 1 } + +Dot1dTpFdbEntry ::= + SEQUENCE { + dot1dTpFdbAddress + MacAddress, + dot1dTpFdbPort + Integer32, + dot1dTpFdbStatus + INTEGER + } + +dot1dTpFdbAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "A unicast MAC address for which the bridge has + forwarding and/or filtering information." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dTpFdbEntry 1 } + +dot1dTpFdbPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port on + which a frame having a source address equal to the value + of the corresponding instance of dot1dTpFdbAddress has + been seen. A value of '0' indicates that the port + number has not been learned, but that the bridge does + have some forwarding/filtering information about this + address (e.g., in the dot1dStaticTable). Implementors + are encouraged to assign the port value to this object + whenever it is learned, even for addresses for which the + corresponding value of dot1dTpFdbStatus is not + learned(3)." + ::= { dot1dTpFdbEntry 2 } + +dot1dTpFdbStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + learned(3), + self(4), + mgmt(5) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The status of this entry. The meanings of the + values are: + other(1) - none of the following. This would + include the case where some other MIB object + (not the corresponding instance of + dot1dTpFdbPort, nor an entry in the + dot1dStaticTable) is being used to determine if + and how frames addressed to the value of the + corresponding instance of dot1dTpFdbAddress are + being forwarded. + invalid(2) - this entry is no longer valid (e.g., + it was learned but has since aged out), but has + not yet been flushed from the table. + learned(3) - the value of the corresponding instance + of dot1dTpFdbPort was learned, and is being + used. + self(4) - the value of the corresponding instance of + dot1dTpFdbAddress represents one of the bridge's + addresses. The corresponding instance of + dot1dTpFdbPort indicates which of the bridge's + ports has this address. + mgmt(5) - the value of the corresponding instance of + dot1dTpFdbAddress is also the value of an + existing instance of dot1dStaticAddress." + ::= { dot1dTpFdbEntry 3 } + +-- ---------------------------------------------------------- -- +-- Port Table for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about every port that + is associated with this transparent bridge." + ::= { dot1dTp 4 } + +dot1dTpPortEntry OBJECT-TYPE + SYNTAX Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of a transparent + bridge." + INDEX { dot1dTpPort } + ::= { dot1dTpPortTable 1 } + +Dot1dTpPortEntry ::= + SEQUENCE { + dot1dTpPort + Integer32, + dot1dTpPortMaxInfo + Integer32, + dot1dTpPortInFrames + Counter32, + dot1dTpPortOutFrames + Counter32, + dot1dTpPortInDiscards + Counter32 + } + +dot1dTpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Transparent bridging management information." + ::= { dot1dTpPortEntry 1 } + +-- It would be nice if we could use ifMtu as the size of the +-- largest INFO field, but we can't because ifMtu is defined +-- to be the size that the (inter-)network layer can use, which +-- can differ from the MAC layer (especially if several layers +-- of encapsulation are used). + +dot1dTpPortMaxInfo OBJECT-TYPE + SYNTAX Integer32 + UNITS "bytes" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum size of the INFO (non-MAC) field that + + this port will receive or transmit." + ::= { dot1dTpPortEntry 2 } + +dot1dTpPortInFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been received by this + port from its segment. Note that a frame received on the + interface corresponding to this port is only counted by + this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 3 } + +dot1dTpPortOutFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been transmitted by this + port to its segment. Note that a frame transmitted on + the interface corresponding to this port is only counted + by this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 4 } + +dot1dTpPortInDiscards OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Count of received valid frames that were discarded + (i.e., filtered) by the Forwarding Process." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 5 } + +-- ---------------------------------------------------------- -- + +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- +-- Implementation of this subtree is optional. +-- ---------------------------------------------------------- -- + +dot1dStaticTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing filtering information configured + into the bridge by (local or network) management + specifying the set of ports to which frames received + from specific ports and containing specific destination + addresses are allowed to be forwarded. The value of + zero in this table, as the port number from which frames + with a specific destination address are received, is + used to specify all ports for which there is no specific + entry in this table for that particular destination + address. Entries are valid for unicast and for + group/broadcast addresses." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + ::= { dot1dStatic 1 } + +dot1dStaticEntry OBJECT-TYPE + SYNTAX Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Filtering information configured into the bridge by + (local or network) management specifying the set of + ports to which frames received from a specific port and + containing a specific destination address are allowed to + be forwarded." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + INDEX { dot1dStaticAddress, dot1dStaticReceivePort } + ::= { dot1dStaticTable 1 } + +Dot1dStaticEntry ::= + SEQUENCE { + dot1dStaticAddress MacAddress, + dot1dStaticReceivePort Integer32, + dot1dStaticAllowedToGoTo OCTET STRING, + dot1dStaticStatus INTEGER + } + +dot1dStaticAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The destination MAC address in a frame to which this + entry's filtering information applies. This object can + take the value of a unicast address, a group address, or + the broadcast address." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dStaticEntry 1 } + +dot1dStaticReceivePort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port + from which a frame must be received in order for this + entry's filtering information to apply. A value of zero + indicates that this entry applies on all ports of the + bridge for which there is no other applicable entry." + ::= { dot1dStaticEntry 2 } + +dot1dStaticAllowedToGoTo OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..512)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The set of ports to which frames received from a + specific port and destined for a specific MAC address, + are allowed to be forwarded. Each octet within the + value of this object specifies a set of eight ports, + with the first octet specifying ports 1 through 8, the + second octet specifying ports 9 through 16, etc. Within + each octet, the most significant bit represents the + lowest numbered port, and the least significant bit + represents the highest numbered port. Thus, each port + of the bridge is represented by a single bit within the + value of this object. If that bit has a value of '1', + then that port is included in the set of ports; the port + is not included if its bit has a value of '0'. (Note + that the setting of the bit corresponding to the port + from which a frame is received is irrelevant.) The + default value of this object is a string of ones of + appropriate length. + + The value of this object may exceed the required minimum + maximum message size of some SNMP transport (484 bytes, + in the case of SNMP over UDP, see RFC 3417, section 3.2). + SNMP engines on bridges supporting a large number of + ports must support appropriate maximum message sizes." + ::= { dot1dStaticEntry 3 } + +dot1dStaticStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + permanent(3), + deleteOnReset(4), + deleteOnTimeout(5) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object indicates the status of this entry. + The default value is permanent(3). + other(1) - this entry is currently in use but the + conditions under which it will remain so are + different from each of the following values. + invalid(2) - writing this value to the object + removes the corresponding entry. + permanent(3) - this entry is currently in use and + will remain so after the next reset of the + bridge. + deleteOnReset(4) - this entry is currently in use + and will remain so until the next reset of the + bridge. + deleteOnTimeout(5) - this entry is currently in use + and will remain so until it is aged out." + ::= { dot1dStaticEntry 4 } + +-- ---------------------------------------------------------- -- +-- Notifications for use by Bridges +-- ---------------------------------------------------------- -- +-- Notifications for the Spanning Tree Protocol +-- ---------------------------------------------------------- -- + +newRoot NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "The newRoot trap indicates that the sending agent has + become the new root of the Spanning Tree; the trap is + sent by a bridge soon after its election as the new + + root, e.g., upon expiration of the Topology Change Timer, + immediately subsequent to its election. Implementation + of this trap is optional." + ::= { dot1dNotifications 1 } + +topologyChange NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "A topologyChange trap is sent by a bridge when any of + its configured ports transitions from the Learning state + to the Forwarding state, or from the Forwarding state to + the Blocking state. The trap is not sent if a newRoot + trap is sent for the same transition. Implementation of + this trap is optional." + ::= { dot1dNotifications 2 } + +-- ---------------------------------------------------------- -- +-- IEEE 802.1D MIB - Conformance Information +-- ---------------------------------------------------------- -- + +dot1dGroups OBJECT IDENTIFIER ::= { dot1dConformance 1 } +dot1dCompliances OBJECT IDENTIFIER ::= { dot1dConformance 2 } + +-- ---------------------------------------------------------- -- +-- units of conformance +-- ---------------------------------------------------------- -- + +-- ---------------------------------------------------------- -- +-- the dot1dBase group +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dBaseBridgeAddress, + dot1dBaseNumPorts, + dot1dBaseType + } + STATUS current + DESCRIPTION + "Bridge level information for this device." + ::= { dot1dGroups 1 } + +dot1dBasePortGroup OBJECT-GROUP + OBJECTS { + dot1dBasePort, + dot1dBasePortIfIndex, + dot1dBasePortCircuit, + dot1dBasePortDelayExceededDiscards, + dot1dBasePortMtuExceededDiscards + } + STATUS current + DESCRIPTION + "Information for each port on this device." + ::= { dot1dGroups 2 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp group +-- ---------------------------------------------------------- -- + +dot1dStpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dStpProtocolSpecification, + dot1dStpPriority, + dot1dStpTimeSinceTopologyChange, + dot1dStpTopChanges, + dot1dStpDesignatedRoot, + dot1dStpRootCost, + dot1dStpRootPort, + dot1dStpMaxAge, + dot1dStpHelloTime, + dot1dStpHoldTime, + dot1dStpForwardDelay, + dot1dStpBridgeMaxAge, + dot1dStpBridgeHelloTime, + dot1dStpBridgeForwardDelay + } + STATUS current + DESCRIPTION + "Bridge level Spanning Tree data for this device." + ::= { dot1dGroups 3 } + +dot1dStpPortGroup OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortPathCost, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 4 } + +dot1dStpPortGroup2 OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions, + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 5 } + +dot1dStpPortGroup3 OBJECT-GROUP + OBJECTS { + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for devices supporting 32-bit + path costs." + ::= { dot1dGroups 6 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp group +-- ---------------------------------------------------------- -- + +dot1dTpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dTpLearnedEntryDiscards, + dot1dTpAgingTime + } + STATUS current + DESCRIPTION + "Bridge level Transparent Bridging data." + ::= { dot1dGroups 7 } + +dot1dTpFdbGroup OBJECT-GROUP + OBJECTS { + + dot1dTpFdbAddress, + dot1dTpFdbPort, + dot1dTpFdbStatus + } + STATUS current + DESCRIPTION + "Filtering Database information for the Bridge." + ::= { dot1dGroups 8 } + +dot1dTpGroup OBJECT-GROUP + OBJECTS { + dot1dTpPort, + dot1dTpPortMaxInfo, + dot1dTpPortInFrames, + dot1dTpPortOutFrames, + dot1dTpPortInDiscards + } + STATUS current + DESCRIPTION + "Dynamic Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 9 } + +-- ---------------------------------------------------------- -- +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- + +dot1dStaticGroup OBJECT-GROUP + OBJECTS { + dot1dStaticAddress, + dot1dStaticReceivePort, + dot1dStaticAllowedToGoTo, + dot1dStaticStatus + } + STATUS current + DESCRIPTION + "Static Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 10 } + +-- ---------------------------------------------------------- -- +-- The Trap Notification Group +-- ---------------------------------------------------------- -- + +dot1dNotificationGroup NOTIFICATION-GROUP + NOTIFICATIONS { + newRoot, + topologyChange + } + STATUS current + DESCRIPTION + "Group of objects describing notifications (traps)." + ::= { dot1dGroups 11 } + +-- ---------------------------------------------------------- -- +-- compliance statements +-- ---------------------------------------------------------- -- + +bridgeCompliance1493 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services, as per RFC1493." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 1 } + +bridgeCompliance4188 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services. This supports 32-bit Path Cost values and the + more restricted bridge and port priorities, as per IEEE + 802.1t. + + Full support for the 802.1D management objects requires that + the SNMPv2-MIB [RFC3418] objects sysDescr, and sysUpTime, as + well as the IF-MIB [RFC2863] objects ifIndex, ifType, + ifDescr, ifPhysAddress, and ifLastChange are implemented." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + OBJECT dot1dStpPriority + SYNTAX Integer32 (0|4096|8192|12288|16384|20480|24576 + |28672|32768|36864|40960|45056|49152 + |53248|57344|61440) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dStpPortGroup2 + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup3 + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol and 32-bit path + costs. In particular, this includes devices supporting + IEEE 802.1t and IEEE 802.1w." + + OBJECT dot1dStpPortPriority + SYNTAX Integer32 (0|16|32|48|64|80|96|112|128 + |144|160|176|192|208|224|240) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 2 } + +END diff --git a/plugins/inputs/snmp/testdata/bridgeMibImports b/plugins/inputs/snmp/testdata/bridgeMibImports new file mode 100644 index 0000000000000..8f6a52bd36058 --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMibImports @@ -0,0 +1,554 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InterfaceIndex ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A unique value, greater than zero, for each interface or + interface sub-layer in the managed system. It is + recommended that values are assigned contiguously starting + from 1. The value for each interface sub-layer must remain + constant at least from one re-initialization of the entity's + network management system to the next re-initialization." + SYNTAX Integer32 (1..2147483647) + + + +MacAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents an 802 MAC address represented in the + `canonical' order defined by IEEE 802.1a, i.e., as if it + were transmitted least significant bit first, even though + 802.5 (in contrast to other 802.x protocols) requires MAC + addresses to be transmitted most significant bit first." + SYNTAX OCTET STRING (SIZE (6)) + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/foo b/plugins/inputs/snmp/testdata/foo new file mode 100644 index 0000000000000..4e9bf7f9d16f9 --- /dev/null +++ b/plugins/inputs/snmp/testdata/foo @@ -0,0 +1,30 @@ +FOOTEST-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +fooTestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +fooMIBObjects OBJECT IDENTIFIER ::= { iso 2 } +fooOne OBJECT IDENTIFIER ::= { iso 1 } +six OBJECT IDENTIFIER ::= { fooOne 1 } +three OBJECT IDENTIFIER ::= { six 3 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { fooMIBObjects 3 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/fooImports b/plugins/inputs/snmp/testdata/fooImports new file mode 100644 index 0000000000000..6cbed24de4b95 --- /dev/null +++ b/plugins/inputs/snmp/testdata/fooImports @@ -0,0 +1,169 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 2 } +internet OBJECT IDENTIFIER ::= { dod 3 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/ifPhysAddress b/plugins/inputs/snmp/testdata/ifPhysAddress new file mode 100644 index 0000000000000..8ac5b5a2e9489 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddress @@ -0,0 +1,84 @@ +IF-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, mib-2, + PhysAddress FROM ifPhysAddressImports; + +ifMIB MODULE-IDENTITY + LAST-UPDATED "200006140000Z" + ORGANIZATION "IETF Interfaces MIB Working Group" + CONTACT-INFO + " Keith McCloghrie + Cisco Systems, Inc. + 170 West Tasman Drive + San Jose, CA 95134-1706 + US + + 408-526-5260 + kzm@cisco.com" + DESCRIPTION + "The MIB module to describe generic objects for network + interface sub-layers. This MIB is an updated version of + MIB-II's ifTable, and incorporates the extensions defined in + RFC 1229." + + REVISION "200006140000Z" + DESCRIPTION + "Clarifications agreed upon by the Interfaces MIB WG, and + published as RFC 2863." + REVISION "199602282155Z" + DESCRIPTION + "Revisions made by the Interfaces MIB WG, and published in + RFC 2233." + REVISION "199311082155Z" + DESCRIPTION + "Initial revision, published as part of RFC 1573." + ::= { mib-2 31 } + +ifMIBObjects OBJECT IDENTIFIER ::= { ifMIB 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of interface entries. The number of entries is + given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An entry containing management information applicable to a + particular interface." + INDEX { ifIndex } + ::= { ifTable 1 } + + + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { ifEntry 9 } + +END diff --git a/plugins/inputs/snmp/testdata/ifPhysAddressImports b/plugins/inputs/snmp/testdata/ifPhysAddressImports new file mode 100644 index 0000000000000..316f665b4f916 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddressImports @@ -0,0 +1,254 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +PhysAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents media- or physical-level addresses." + SYNTAX OCTET STRING + + +END diff --git a/plugins/inputs/snmp/testdata/server b/plugins/inputs/snmp/testdata/server new file mode 100644 index 0000000000000..4f97618d62ef3 --- /dev/null +++ b/plugins/inputs/snmp/testdata/server @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +server OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +connections OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +latency OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +description OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/serverImports b/plugins/inputs/snmp/testdata/serverImports new file mode 100644 index 0000000000000..6bfb238234f07 --- /dev/null +++ b/plugins/inputs/snmp/testdata/serverImports @@ -0,0 +1,174 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 1 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 1 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/snmpd.conf b/plugins/inputs/snmp/testdata/snmpd.conf deleted file mode 100644 index 3f3151a6550c0..0000000000000 --- a/plugins/inputs/snmp/testdata/snmpd.conf +++ /dev/null @@ -1,17 +0,0 @@ -# This config provides the data represented in the plugin documentation -# Requires net-snmp >= 5.7 - -#agentaddress UDP:127.0.0.1:1161 -rocommunity public - -override .1.0.0.0.1.1.0 octet_str "foo" -override .1.0.0.0.1.1.1 octet_str "bar" -override .1.0.0.0.1.102 octet_str "bad" -override .1.0.0.0.1.2.0 integer 1 -override .1.0.0.0.1.2.1 integer 2 -override .1.0.0.0.1.3.0 octet_str "0.123" -override .1.0.0.0.1.3.1 octet_str "0.456" -override .1.0.0.0.1.3.2 octet_str "9.999" -override .1.0.0.1.1 octet_str "baz" -override .1.0.0.1.2 uinteger 54321 -override .1.0.0.1.3 uinteger 234 diff --git a/plugins/inputs/snmp/testdata/tableBuild b/plugins/inputs/snmp/testdata/tableBuild new file mode 100644 index 0000000000000..0551bfd6dd1d4 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableBuild @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +myfield1 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +myfield2 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +myfield3 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +myfield4 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMib b/plugins/inputs/snmp/testdata/tableMib new file mode 100644 index 0000000000000..be13c1c1cc510 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMib @@ -0,0 +1,2613 @@ +RFC1213-MIB DEFINITIONS ::= BEGIN + +IMPORTS + mgmt, NetworkAddress, IpAddress, Counter, Gauge, + TimeTicks + FROM RFC1155-SMI + OBJECT-TYPE + FROM fooImports; + +-- This MIB module uses the extended OBJECT-TYPE macro as +-- defined in [14]; + +-- MIB-II (same prefix as MIB-I) + +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +-- textual conventions + +DisplayString ::= + OCTET STRING +-- This data type is used to model textual information taken +-- from the NVT ASCII character set. By convention, objects +-- with this syntax are declared as having + +-- +-- SIZE (0..255) + +PhysAddress ::= + OCTET STRING +-- This data type is used to model media addresses. For many +-- types of media, this will be in a binary representation. +-- For example, an ethernet address would be represented as +-- a string of 6 octets. + +-- groups in MIB-II + +system OBJECT IDENTIFIER ::= { mib-2 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + +at OBJECT IDENTIFIER ::= { mib-2 3 } + +ip OBJECT IDENTIFIER ::= { mib-2 4 } + +icmp OBJECT IDENTIFIER ::= { mib-2 5 } + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +udp OBJECT IDENTIFIER ::= { mib-2 7 } + +egp OBJECT IDENTIFIER ::= { mib-2 8 } + +-- historical (some say hysterical) +-- cmot OBJECT IDENTIFIER ::= { mib-2 9 } + +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +snmp OBJECT IDENTIFIER ::= { mib-2 11 } + +-- the System group + +-- Implementation of the System group is mandatory for all +-- systems. If an agent is not configured to have a value +-- for any of these variables, a string of length 0 is +-- returned. + +sysDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual description of the entity. This value + should include the full name and version + identification of the system's hardware type, + software operating-system, and networking + software. It is mandatory that this only contain + printable ASCII characters." + ::= { system 1 } + +sysObjectID OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The vendor's authoritative identification of the + network management subsystem contained in the + entity. This value is allocated within the SMI + enterprises subtree (1.3.6.1.4.1) and provides an + easy and unambiguous means for determining `what + kind of box' is being managed. For example, if + vendor `Flintstones, Inc.' was assigned the + subtree 1.3.6.1.4.1.4242, it could assign the + identifier 1.3.6.1.4.1.4242.1.1 to its `Fred + Router'." + ::= { system 2 } + +sysUpTime OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The time (in hundredths of a second) since the + network management portion of the system was last + re-initialized." + ::= { system 3 } + +sysContact OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The textual identification of the contact person + for this managed node, together with information + on how to contact this person." + ::= { system 4 } + +sysName OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An administratively-assigned name for this + managed node. By convention, this is the node's + fully-qualified domain name." + ::= { system 5 } + +sysLocation OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The physical location of this node (e.g., + `telephone closet, 3rd floor')." + ::= { system 6 } + +sysServices OBJECT-TYPE + SYNTAX INTEGER (0..127) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A value which indicates the set of services that + this entity primarily offers. + + The value is a sum. This sum initially takes the + value zero, Then, for each layer, L, in the range + 1 through 7, that this node performs transactions + for, 2 raised to (L - 1) is added to the sum. For + example, a node which performs primarily routing + functions would have a value of 4 (2^(3-1)). In + contrast, a node which is a host offering + application services would have a value of 72 + (2^(4-1) + 2^(7-1)). Note that in the context of + the Internet suite of protocols, values should be + calculated accordingly: + + layer functionality + 1 physical (e.g., repeaters) + 2 datalink/subnetwork (e.g., bridges) + 3 internet (e.g., IP gateways) + 4 end-to-end (e.g., IP hosts) + 7 applications (e.g., mail relays) + + For systems including OSI protocols, layers 5 and + 6 may also be counted." + ::= { system 7 } + +-- the Interfaces group + +-- Implementation of the Interfaces group is mandatory for +-- all systems. + +ifNumber OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of network interfaces (regardless of + their current state) present on this system." + ::= { interfaces 1 } + +-- the Interfaces table + +-- The Interfaces table contains information on the entity's +-- interfaces. Each interface is thought of as being +-- attached to a `subnetwork'. Note that this term should +-- not be confused with `subnet' which refers to an +-- addressing partitioning scheme used in the Internet suite +-- of protocols. + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A list of interface entries. The number of + entries is given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "An interface entry containing objects at the + subnetwork layer and below for a particular + interface." + INDEX { ifIndex } + ::= { ifTable 1 } + +IfEntry ::= + SEQUENCE { + ifIndex + INTEGER, + ifDescr + DisplayString, + ifType + INTEGER, + ifMtu + INTEGER, + ifSpeed + Gauge, + ifPhysAddress + PhysAddress, + ifAdminStatus + INTEGER, + ifOperStatus + INTEGER, + ifLastChange + TimeTicks, + ifInOctets + Counter, + ifInUcastPkts + Counter, + ifInNUcastPkts + Counter, + ifInDiscards + Counter, + ifInErrors + Counter, + ifInUnknownProtos + Counter, + ifOutOctets + Counter, + ifOutUcastPkts + Counter, + ifOutNUcastPkts + Counter, + ifOutDiscards + Counter, + ifOutErrors + Counter, + ifOutQLen + Gauge, + ifSpecific + OBJECT IDENTIFIER + } + +ifIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A unique value for each interface. Its value + ranges between 1 and the value of ifNumber. The + value for each interface must remain constant at + least from one re-initialization of the entity's + network management system to the next re- + initialization." + ::= { ifEntry 1 } + +ifDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual string containing information about the + interface. This string should include the name of + the manufacturer, the product name and the version + of the hardware interface." + ::= { ifEntry 2 } + +ifType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + regular1822(2), + hdh1822(3), + ddn-x25(4), + rfc877-x25(5), + ethernet-csmacd(6), + iso88023-csmacd(7), + iso88024-tokenBus(8), + iso88025-tokenRing(9), + iso88026-man(10), + starLan(11), + proteon-10Mbit(12), + proteon-80Mbit(13), + hyperchannel(14), + fddi(15), + lapb(16), + sdlc(17), + ds1(18), -- T-1 + e1(19), -- european equiv. of T-1 + basicISDN(20), + primaryISDN(21), -- proprietary serial + propPointToPointSerial(22), + ppp(23), + softwareLoopback(24), + eon(25), -- CLNP over IP [11] + ethernet-3Mbit(26), + nsip(27), -- XNS over IP + slip(28), -- generic SLIP + ultra(29), -- ULTRA technologies + ds3(30), -- T-3 + sip(31), -- SMDS + frame-relay(32) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The type of interface, distinguished according to + the physical/link protocol(s) immediately `below' + the network layer in the protocol stack." + ::= { ifEntry 3 } + +ifMtu OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest datagram which can be + sent/received on the interface, specified in + octets. For interfaces that are used for + transmitting network datagrams, this is the size + of the largest network datagram that can be sent + on the interface." + ::= { ifEntry 4 } + +ifSpeed OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "An estimate of the interface's current bandwidth + in bits per second. For interfaces which do not + vary in bandwidth or for those where no accurate + estimation can be made, this object should contain + the nominal bandwidth." + ::= { ifEntry 5 } + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +ifAdminStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The desired state of the interface. The + testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 7 } + +ifOperStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The current operational state of the interface. + The testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 8 } + +ifLastChange OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of sysUpTime at the time the interface + entered its current operational state. If the + current state was entered prior to the last re- + initialization of the local network management + subsystem, then this object contains a zero + value." + ::= { ifEntry 9 } + +ifInOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets received on the + interface, including framing characters." + ::= { ifEntry 10 } + +ifInUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of subnetwork-unicast packets + delivered to a higher-layer protocol." + ::= { ifEntry 11 } + +ifInNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of non-unicast (i.e., subnetwork- + broadcast or subnetwork-multicast) packets + delivered to a higher-layer protocol." + ::= { ifEntry 12 } + +ifInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being deliverable to a + higher-layer protocol. One possible reason for + discarding such a packet could be to free up + buffer space." + ::= { ifEntry 13 } + +ifInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets that contained + errors preventing them from being deliverable to a + higher-layer protocol." + ::= { ifEntry 14 } + +ifInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of packets received via the interface + which were discarded because of an unknown or + unsupported protocol." + ::= { ifEntry 15 } + +ifOutOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets transmitted out of the + interface, including framing characters." + ::= { ifEntry 16 } + +ifOutUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a + subnetwork-unicast address, including those that + were discarded or not sent." + ::= { ifEntry 17 } + +ifOutNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a non- + unicast (i.e., a subnetwork-broadcast or + subnetwork-multicast) address, including those + that were discarded or not sent." + ::= { ifEntry 18 } + +ifOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets which were chosen + + to be discarded even though no errors had been + detected to prevent their being transmitted. One + possible reason for discarding such a packet could + be to free up buffer space." + ::= { ifEntry 19 } + +ifOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets that could not be + transmitted because of errors." + ::= { ifEntry 20 } + +ifOutQLen OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The length of the output packet queue (in + packets)." + ::= { ifEntry 21 } + +ifSpecific OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular media being used to realize the + interface. For example, if the interface is + realized by an ethernet, then the value of this + object refers to a document defining objects + specific to ethernet. If this information is not + present, its value should be set to the OBJECT + IDENTIFIER { 0 0 }, which is a syntatically valid + object identifier, and any conformant + implementation of ASN.1 and BER must be able to + generate and recognize this value." + ::= { ifEntry 22 } + +-- the Address Translation group + +-- Implementation of the Address Translation group is +-- mandatory for all systems. Note however that this group +-- is deprecated by MIB-II. That is, it is being included + +-- solely for compatibility with MIB-I nodes, and will most +-- likely be excluded from MIB-III nodes. From MIB-II and +-- onwards, each network protocol group contains its own +-- address translation tables. + +-- The Address Translation group contains one table which is +-- the union across all interfaces of the translation tables +-- for converting a NetworkAddress (e.g., an IP address) into +-- a subnetwork-specific address. For lack of a better term, +-- this document refers to such a subnetwork-specific address +-- as a `physical' address. + +-- Examples of such translation tables are: for broadcast +-- media where ARP is in use, the translation table is +-- equivalent to the ARP cache; or, on an X.25 network where +-- non-algorithmic translation to X.121 addresses is +-- required, the translation table contains the +-- NetworkAddress to X.121 address equivalences. + +atTable OBJECT-TYPE + SYNTAX SEQUENCE OF AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "The Address Translation tables contain the + NetworkAddress to `physical' address equivalences. + Some interfaces do not use translation tables for + determining address equivalences (e.g., DDN-X.25 + has an algorithmic method); if all interfaces are + of this type, then the Address Translation table + is empty, i.e., has zero entries." + ::= { at 1 } + +atEntry OBJECT-TYPE + SYNTAX AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "Each entry contains one NetworkAddress to + `physical' address equivalence." + INDEX { atIfIndex, + atNetAddress } + ::= { atTable 1 } + +AtEntry ::= + SEQUENCE { + atIfIndex + INTEGER, + atPhysAddress + PhysAddress, + atNetAddress + NetworkAddress + } + +atIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { atEntry 1 } + +atPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The media-dependent `physical' address. + + Setting this object to a null string (one of zero + length) has the effect of invaliding the + corresponding entry in the atTable object. That + is, it effectively dissasociates the interface + identified with said entry from the mapping + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant atPhysAddress object." + ::= { atEntry 2 } + +atNetAddress OBJECT-TYPE + SYNTAX NetworkAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The NetworkAddress (e.g., the IP address) + corresponding to the media-dependent `physical' + address." + ::= { atEntry 3 } + +-- the IP group + +-- Implementation of the IP group is mandatory for all +-- systems. + +ipForwarding OBJECT-TYPE + SYNTAX INTEGER { + forwarding(1), -- acting as a gateway + not-forwarding(2) -- NOT acting as a gateway + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The indication of whether this entity is acting + as an IP gateway in respect to the forwarding of + datagrams received by, but not addressed to, this + entity. IP gateways forward datagrams. IP hosts + do not (except those source-routed via the host). + + Note that for some managed nodes, this object may + take on only a subset of the values possible. + Accordingly, it is appropriate for an agent to + return a `badValue' response if a management + station attempts to change this object to an + inappropriate value." + ::= { ip 1 } + +ipDefaultTTL OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The default value inserted into the Time-To-Live + field of the IP header of datagrams originated at + this entity, whenever a TTL value is not supplied + by the transport layer protocol." + ::= { ip 2 } + +ipInReceives OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams received from + interfaces, including those received in error." + ::= { ip 3 } + +ipInHdrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded due to + errors in their IP headers, including bad + checksums, version number mismatch, other format + errors, time-to-live exceeded, errors discovered + in processing their IP options, etc." + ::= { ip 4 } + +ipInAddrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded because + the IP address in their IP header's destination + field was not a valid address to be received at + this entity. This count includes invalid + addresses (e.g., 0.0.0.0) and addresses of + unsupported Classes (e.g., Class E). For entities + which are not IP Gateways and therefore do not + forward datagrams, this counter includes datagrams + discarded because the destination address was not + a local address." + ::= { ip 5 } + +ipForwDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams for which this + entity was not their final IP destination, as a + result of which an attempt was made to find a + route to forward them to that final destination. + In entities which do not act as IP Gateways, this + counter will include only those packets which were + Source-Routed via this entity, and the Source- + Route option processing was successful." + ::= { ip 6 } + +ipInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally-addressed datagrams + received successfully but discarded because of an + unknown or unsupported protocol." + ::= { ip 7 } + +ipInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input IP datagrams for which no + problems were encountered to prevent their + continued processing, but which were discarded + (e.g., for lack of buffer space). Note that this + counter does not include any datagrams discarded + while awaiting re-assembly." + ::= { ip 8 } + +ipInDelivers OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams successfully + delivered to IP user-protocols (including ICMP)." + ::= { ip 9 } + +ipOutRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of IP datagrams which local IP + user-protocols (including ICMP) supplied to IP in + requests for transmission. Note that this counter + does not include any datagrams counted in + ipForwDatagrams." + ::= { ip 10 } + +ipOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of output IP datagrams for which no + + problem was encountered to prevent their + transmission to their destination, but which were + discarded (e.g., for lack of buffer space). Note + that this counter would include datagrams counted + in ipForwDatagrams if any such packets met this + (discretionary) discard criterion." + ::= { ip 11 } + +ipOutNoRoutes OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams discarded because no + route could be found to transmit them to their + destination. Note that this counter includes any + packets counted in ipForwDatagrams which meet this + `no-route' criterion. Note that this includes any + datagarms which a host cannot route because all of + its default gateways are down." + ::= { ip 12 } + +ipReasmTimeout OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum number of seconds which received + fragments are held while they are awaiting + reassembly at this entity." + ::= { ip 13 } + +ipReasmReqds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP fragments received which needed + to be reassembled at this entity." + ::= { ip 14 } + +ipReasmOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams successfully re- + assembled." + ::= { ip 15 } + +ipReasmFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of failures detected by the IP re- + assembly algorithm (for whatever reason: timed + out, errors, etc). Note that this is not + necessarily a count of discarded IP fragments + since some algorithms (notably the algorithm in + RFC 815) can lose track of the number of fragments + by combining them as they are received." + ::= { ip 16 } + +ipFragOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + successfully fragmented at this entity." + ::= { ip 17 } + +ipFragFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + discarded because they needed to be fragmented at + this entity but could not be, e.g., because their + Don't Fragment flag was set." + ::= { ip 18 } + +ipFragCreates OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagram fragments that have + been generated as a result of fragmentation at + this entity." + ::= { ip 19 } + +-- the IP address table + +-- The IP address table contains this entity's IP addressing +-- information. + +ipAddrTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The table of addressing information relevant to + this entity's IP addresses." + ::= { ip 20 } + +ipAddrEntry OBJECT-TYPE + SYNTAX IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The addressing information for one of this + entity's IP addresses." + INDEX { ipAdEntAddr } + ::= { ipAddrTable 1 } + +IpAddrEntry ::= + SEQUENCE { + ipAdEntAddr + IpAddress, + ipAdEntIfIndex + INTEGER, + ipAdEntNetMask + IpAddress, + ipAdEntBcastAddr + INTEGER, + ipAdEntReasmMaxSize + INTEGER (0..65535) + } + +ipAdEntAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address to which this entry's addressing + information pertains." + ::= { ipAddrEntry 1 } + +ipAdEntIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + interface to which this entry is applicable. The + interface identified by a particular value of this + index is the same interface as identified by the + same value of ifIndex." + ::= { ipAddrEntry 2 } + +ipAdEntNetMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The subnet mask associated with the IP address of + this entry. The value of the mask is an IP + address with all the network bits set to 1 and all + the hosts bits set to 0." + ::= { ipAddrEntry 3 } + +ipAdEntBcastAddr OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of the least-significant bit in the IP + broadcast address used for sending datagrams on + the (logical) interface associated with the IP + address of this entry. For example, when the + Internet standard all-ones broadcast address is + used, the value will be 1. This value applies to + both the subnet and network broadcasts addresses + used by the entity on this (logical) interface." + ::= { ipAddrEntry 4 } + +ipAdEntReasmMaxSize OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest IP datagram which this + entity can re-assemble from incoming IP fragmented + datagrams received on this interface." + ::= { ipAddrEntry 5 } + +-- the IP routing table + +-- The IP routing table contains an entry for each route +-- presently known to this entity. + +ipRouteTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "This entity's IP Routing table." + ::= { ip 21 } + +ipRouteEntry OBJECT-TYPE + SYNTAX IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A route to a particular destination." + INDEX { ipRouteDest } + ::= { ipRouteTable 1 } + +IpRouteEntry ::= + SEQUENCE { + ipRouteDest + IpAddress, + ipRouteIfIndex + INTEGER, + ipRouteMetric1 + INTEGER, + ipRouteMetric2 + INTEGER, + ipRouteMetric3 + INTEGER, + ipRouteMetric4 + INTEGER, + ipRouteNextHop + IpAddress, + ipRouteType + INTEGER, + ipRouteProto + INTEGER, + ipRouteAge + INTEGER, + ipRouteMask + IpAddress, + ipRouteMetric5 + INTEGER, + ipRouteInfo + OBJECT IDENTIFIER + } + +ipRouteDest OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The destination IP address of this route. An + entry with a value of 0.0.0.0 is considered a + default route. Multiple routes to a single + destination can appear in the table, but access to + such multiple entries is dependent on the table- + access mechanisms defined by the network + management protocol in use." + ::= { ipRouteEntry 1 } + +ipRouteIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + local interface through which the next hop of this + route should be reached. The interface identified + by a particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipRouteEntry 2 } + +ipRouteMetric1 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The primary routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 3 } + +ipRouteMetric2 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 4 } + +ipRouteMetric3 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 5 } + +ipRouteMetric4 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 6 } + +ipRouteNextHop OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IP address of the next hop of this route. + (In the case of a route bound to an interface + which is realized via a broadcast media, the value + of this field is the agent's IP address on that + interface.)" + ::= { ipRouteEntry 7 } + +ipRouteType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + invalid(2), -- an invalidated route + + -- route to directly + direct(3), -- connected (sub-)network + + -- route to a non-local + indirect(4) -- host/network/sub-network + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of route. Note that the values + direct(3) and indirect(4) refer to the notion of + direct and indirect routing in the IP + architecture. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipRouteTable object. That is, it + effectively dissasociates the destination + identified with said entry from the route + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant ipRouteType object." + ::= { ipRouteEntry 8 } + +ipRouteProto OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + -- non-protocol information, + -- e.g., manually configured + local(2), -- entries + + -- set via a network + netmgmt(3), -- management protocol + + -- obtained via ICMP, + icmp(4), -- e.g., Redirect + + -- the remaining values are + -- all gateway routing + -- protocols + egp(5), + ggp(6), + hello(7), + rip(8), + is-is(9), + es-is(10), + ciscoIgrp(11), + bbnSpfIgp(12), + ospf(13), + bgp(14) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The routing mechanism via which this route was + learned. Inclusion of values for gateway routing + protocols is not intended to imply that hosts + should support those protocols." + ::= { ipRouteEntry 9 } + +ipRouteAge OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The number of seconds since this route was last + updated or otherwise determined to be correct. + Note that no semantics of `too old' can be implied + except through knowledge of the routing protocol + by which the route was learned." + ::= { ipRouteEntry 10 } + +ipRouteMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicate the mask to be logical-ANDed with the + destination address before being compared to the + value in the ipRouteDest field. For those systems + that do not support arbitrary subnet masks, an + agent constructs the value of the ipRouteMask by + determining whether the value of the correspondent + ipRouteDest field belong to a class-A, B, or C + network, and then using one of: + + mask network + 255.0.0.0 class-A + 255.255.0.0 class-B + 255.255.255.0 class-C + + If the value of the ipRouteDest is 0.0.0.0 (a + default route), then the mask value is also + 0.0.0.0. It should be noted that all IP routing + subsystems implicitly use this mechanism." + ::= { ipRouteEntry 11 } + +ipRouteMetric5 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 12 } + +ipRouteInfo OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular routing protocol which is responsible + for this route, as determined by the value + specified in the route's ipRouteProto value. If + this information is not present, its value should + be set to the OBJECT IDENTIFIER { 0 0 }, which is + a syntatically valid object identifier, and any + conformant implementation of ASN.1 and BER must be + able to generate and recognize this value." + ::= { ipRouteEntry 13 } + +-- the IP Address Translation table + +-- The IP address translation table contain the IpAddress to +-- `physical' address equivalences. Some interfaces do not +-- use translation tables for determining address +-- equivalences (e.g., DDN-X.25 has an algorithmic method); +-- if all interfaces are of this type, then the Address +-- Translation table is empty, i.e., has zero entries. + +ipNetToMediaTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The IP Address Translation table used for mapping + from IP addresses to physical addresses." + ::= { ip 22 } + +ipNetToMediaEntry OBJECT-TYPE + SYNTAX IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Each entry contains one IpAddress to `physical' + address equivalence." + INDEX { ipNetToMediaIfIndex, + ipNetToMediaNetAddress } + ::= { ipNetToMediaTable 1 } + +IpNetToMediaEntry ::= + SEQUENCE { + ipNetToMediaIfIndex + INTEGER, + ipNetToMediaPhysAddress + PhysAddress, + ipNetToMediaNetAddress + IpAddress, + ipNetToMediaType + INTEGER + } + +ipNetToMediaIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipNetToMediaEntry 1 } + +ipNetToMediaPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The media-dependent `physical' address." + ::= { ipNetToMediaEntry 2 } + +ipNetToMediaNetAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IpAddress corresponding to the media- + dependent `physical' address." + ::= { ipNetToMediaEntry 3 } + +ipNetToMediaType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + invalid(2), -- an invalidated mapping + dynamic(3), + static(4) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of mapping. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipNetToMediaTable. That is, it effectively + dissasociates the interface identified with said + entry from the mapping identified with said entry. + It is an implementation-specific matter as to + whether the agent removes an invalidated entry + from the table. Accordingly, management stations + must be prepared to receive tabular information + from agents that corresponds to entries not + currently in use. Proper interpretation of such + entries requires examination of the relevant + ipNetToMediaType object." + ::= { ipNetToMediaEntry 4 } + +-- additional IP objects + +ipRoutingDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of routing entries which were chosen + to be discarded even though they are valid. One + possible reason for discarding such an entry could + be to free-up buffer space for other routing + + entries." + ::= { ip 23 } + +-- the ICMP group + +-- Implementation of the ICMP group is mandatory for all +-- systems. + +icmpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which the + entity received. Note that this counter includes + all those counted by icmpInErrors." + ::= { icmp 1 } + +icmpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which the entity + received but determined as having ICMP-specific + errors (bad ICMP checksums, bad length, etc.)." + ::= { icmp 2 } + +icmpInDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages received." + ::= { icmp 3 } + +icmpInTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages + received." + ::= { icmp 4 } + +icmpInParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + received." + ::= { icmp 5 } + +icmpInSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages + received." + ::= { icmp 6 } + +icmpInRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages received." + ::= { icmp 7 } + +icmpInEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages + received." + ::= { icmp 8 } + +icmpInEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages received." + ::= { icmp 9 } + +icmpInTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + received." + ::= { icmp 10 } + +icmpInTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + received." + ::= { icmp 11 } + +icmpInAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + received." + ::= { icmp 12 } + +icmpInAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + received." + ::= { icmp 13 } + +icmpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which this + entity attempted to send. Note that this counter + includes all those counted by icmpOutErrors." + ::= { icmp 14 } + +icmpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which this entity did + not send due to problems discovered within ICMP + + such as a lack of buffers. This value should not + include errors discovered outside the ICMP layer + such as the inability of IP to route the resultant + datagram. In some implementations there may be no + types of error which contribute to this counter's + value." + ::= { icmp 15 } + +icmpOutDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages sent." + ::= { icmp 16 } + +icmpOutTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages sent." + ::= { icmp 17 } + +icmpOutParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + sent." + ::= { icmp 18 } + +icmpOutSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages sent." + ::= { icmp 19 } + +icmpOutRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages sent. For a + + host, this object will always be zero, since hosts + do not send redirects." + ::= { icmp 20 } + +icmpOutEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages sent." + ::= { icmp 21 } + +icmpOutEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages sent." + ::= { icmp 22 } + +icmpOutTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + sent." + ::= { icmp 23 } + +icmpOutTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + sent." + ::= { icmp 24 } + +icmpOutAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + sent." + ::= { icmp 25 } + +icmpOutAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + sent." + ::= { icmp 26 } + +-- the TCP group + +-- Implementation of the TCP group is mandatory for all +-- systems that implement the TCP. + +-- Note that instances of object types that represent +-- information about a particular TCP connection are +-- transient; they persist only as long as the connection +-- in question. + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4) -- Van Jacobson's algorithm [10] + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The algorithm used to determine the timeout value + used for retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The minimum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + LBOUND quantity described in RFC 793." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + UBOUND quantity described in RFC 793." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The limit on the total number of TCP connections + the entity can support. In entities where the + maximum number of connections is dynamic, this + object should contain the value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-SENT state from the + CLOSED state." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-RCVD state from the + LISTEN state." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the SYN-SENT state or the SYN-RCVD state, plus the + number of times TCP connections have made a direct + transition to the LISTEN state from the SYN-RCVD + state." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the ESTABLISHED state or the CLOSE-WAIT state." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP connections for which the + current state is either ESTABLISHED or CLOSE- + WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received, including + those received in error. This count includes + segments received on currently established + connections." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments sent, including + those on current connections but excluding those + containing only retransmitted octets." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments retransmitted - that + is, the number of TCP segments transmitted + containing one or more previously transmitted + octets." + ::= { tcp 12 } + +-- the TCP Connection table + +-- The TCP connection table contains information about this +-- entity's existing TCP connections. + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing TCP connection-specific + information." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current TCP + connection. An object of this type is transient, + in that it ceases to exist when (or soon after) + the connection makes the transition to the CLOSED + state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= + SEQUENCE { + tcpConnState + INTEGER, + tcpConnLocalAddress + IpAddress, + tcpConnLocalPort + INTEGER (0..65535), + tcpConnRemAddress + IpAddress, + tcpConnRemPort + INTEGER (0..65535) + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The state of this TCP connection. + + The only value which may be set by a management + station is deleteTCB(12). Accordingly, it is + appropriate for an agent to return a `badValue' + response if a management station attempts to set + this object to any other value. + + If a management station sets this object to the + value deleteTCB(12), then this has the effect of + deleting the TCB (as defined in RFC 793) of the + corresponding connection on the managed node, + resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST + + segment may be sent from the managed node to the + other TCP endpoint (note however that RST segments + are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this TCP connection. In + the case of a connection in the listen state which + is willing to accept connections for any IP + interface associated with the node, the value + 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- additional TCP objects + +tcpInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received in error + (e.g., bad TCP checksums)." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP segments sent containing the + RST flag." + ::= { tcp 15 } + +-- the UDP group + +-- Implementation of the UDP group is mandatory for all +-- systems which implement the UDP. + +udpInDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams delivered to + UDP users." + ::= { udp 1 } + +udpNoPorts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of received UDP datagrams for + which there was no application at the destination + port." + ::= { udp 2 } + +udpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of received UDP datagrams that could + not be delivered for reasons other than the lack + of an application at the destination port." + ::= { udp 3 } + +udpOutDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams sent from this + entity." + ::= { udp 4 } + +-- the UDP Listener table + +-- The UDP listener table contains information about this +-- entity's UDP end-points on which a local application is +-- currently accepting datagrams. + +udpTable OBJECT-TYPE + SYNTAX SEQUENCE OF UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing UDP listener information." + ::= { udp 5 } + +udpEntry OBJECT-TYPE + SYNTAX UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current UDP + listener." + INDEX { udpLocalAddress, udpLocalPort } + ::= { udpTable 1 } + +UdpEntry ::= + SEQUENCE { + udpLocalAddress + IpAddress, + udpLocalPort + INTEGER (0..65535) + } + +udpLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this UDP listener. In + + the case of a UDP listener which is willing to + accept datagrams for any IP interface associated + with the node, the value 0.0.0.0 is used." + ::= { udpEntry 1 } + +udpLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this UDP listener." + ::= { udpEntry 2 } + +-- the EGP group + +-- Implementation of the EGP group is mandatory for all +-- systems which implement the EGP. + +egpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without + error." + ::= { egp 1 } + +egpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received that proved + to be in error." + ::= { egp 2 } + +egpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of locally generated EGP + messages." + ::= { egp 3 } + +egpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent due to resource limitations within an EGP + entity." + ::= { egp 4 } + +-- the EGP Neighbor table + +-- The EGP neighbor table contains information about this +-- entity's EGP neighbors. + +egpNeighTable OBJECT-TYPE + SYNTAX SEQUENCE OF EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The EGP neighbor table." + ::= { egp 5 } + +egpNeighEntry OBJECT-TYPE + SYNTAX EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about this entity's relationship with + a particular EGP neighbor." + INDEX { egpNeighAddr } + ::= { egpNeighTable 1 } + +EgpNeighEntry ::= + SEQUENCE { + egpNeighState + INTEGER, + egpNeighAddr + IpAddress, + egpNeighAs + INTEGER, + egpNeighInMsgs + Counter, + egpNeighInErrs + Counter, + egpNeighOutMsgs + Counter, + egpNeighOutErrs + Counter, + egpNeighInErrMsgs + Counter, + egpNeighOutErrMsgs + Counter, + egpNeighStateUps + Counter, + egpNeighStateDowns + Counter, + egpNeighIntervalHello + INTEGER, + egpNeighIntervalPoll + INTEGER, + egpNeighMode + INTEGER, + egpNeighEventTrigger + INTEGER + } + +egpNeighState OBJECT-TYPE + SYNTAX INTEGER { + idle(1), + acquisition(2), + down(3), + up(4), + cease(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The EGP state of the local system with respect to + this entry's EGP neighbor. Each EGP state is + represented by a value that is one greater than + the numerical value associated with said state in + RFC 904." + ::= { egpNeighEntry 1 } + +egpNeighAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address of this entry's EGP neighbor." + ::= { egpNeighEntry 2 } + +egpNeighAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system of this EGP peer. Zero + should be specified if the autonomous system + number of the neighbor is not yet known." + ::= { egpNeighEntry 3 } + +egpNeighInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without error + from this EGP peer." + ::= { egpNeighEntry 4 } + +egpNeighInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received from this EGP + peer that proved to be in error (e.g., bad EGP + checksum)." + ::= { egpNeighEntry 5 } + +egpNeighOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages to + this EGP peer." + ::= { egpNeighEntry 6 } + +egpNeighOutErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent to this EGP peer due to resource limitations + within an EGP entity." + ::= { egpNeighEntry 7 } + +egpNeighInErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages received + from this EGP peer." + ::= { egpNeighEntry 8 } + +egpNeighOutErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages sent to + this EGP peer." + ::= { egpNeighEntry 9 } + +egpNeighStateUps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions to the UP + state with this EGP peer." + ::= { egpNeighEntry 10 } + +egpNeighStateDowns OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions from the UP + state to any other state with this EGP peer." + ::= { egpNeighEntry 11 } + +egpNeighIntervalHello OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP Hello command + retransmissions (in hundredths of a second). This + represents the t1 timer as defined in RFC 904." + ::= { egpNeighEntry 12 } + +egpNeighIntervalPoll OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP poll command + + retransmissions (in hundredths of a second). This + represents the t3 timer as defined in RFC 904." + ::= { egpNeighEntry 13 } + +egpNeighMode OBJECT-TYPE + SYNTAX INTEGER { active(1), passive(2) } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The polling mode of this EGP entity, either + passive or active." + ::= { egpNeighEntry 14 } + +egpNeighEventTrigger OBJECT-TYPE + SYNTAX INTEGER { start(1), stop(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "A control variable used to trigger operator- + initiated Start and Stop events. When read, this + variable always returns the most recent value that + egpNeighEventTrigger was set to. If it has not + been set since the last initialization of the + network management subsystem on the node, it + returns a value of `stop'. + + When set, this variable causes a Start or Stop + event on the specified neighbor, as specified on + pages 8-10 of RFC 904. Briefly, a Start event + causes an Idle peer to begin neighbor acquisition + and a non-Idle peer to reinitiate neighbor + acquisition. A stop event causes a non-Idle peer + to return to the Idle state until a Start event + occurs, either via egpNeighEventTrigger or + otherwise." + ::= { egpNeighEntry 15 } + +-- additional EGP objects + +egpAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system number of this EGP entity." + ::= { egp 6 } + +-- the Transmission group + +-- Based on the transmission media underlying each interface +-- on a system, the corresponding portion of the Transmission +-- group is mandatory for that system. + +-- When Internet-standard definitions for managing +-- transmission media are defined, the transmission group is +-- used to provide a prefix for the names of those objects. + +-- Typically, such definitions reside in the experimental +-- portion of the MIB until they are "proven", then as a +-- part of the Internet standardization process, the +-- definitions are accordingly elevated and a new object +-- identifier, under the transmission group is defined. By +-- convention, the name assigned is: +-- +-- type OBJECT IDENTIFIER ::= { transmission number } +-- +-- where "type" is the symbolic value used for the media in +-- the ifType column of the ifTable object, and "number" is +-- the actual integer value corresponding to the symbol. + +-- the SNMP group + +-- Implementation of the SNMP group is mandatory for all +-- systems which support an SNMP protocol entity. Some of +-- the objects defined below will be zero-valued in those +-- SNMP implementations that are optimized to support only +-- those functions specific to either a management agent or +-- a management station. In particular, it should be +-- observed that the objects below refer to an SNMP entity, +-- and there may be several SNMP entities residing on a +-- managed node (e.g., if the node is hosting acting as +-- a management station). + +snmpInPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of Messages delivered to the + SNMP entity from the transport service." + ::= { snmp 1 } + +snmpOutPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + passed from the SNMP protocol entity to the + transport service." + ::= { snmp 2 } + +snmpInBadVersions OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + delivered to the SNMP protocol entity and were for + an unsupported SNMP version." + ::= { snmp 3 } + +snmpInBadCommunityNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which used a SNMP + community name not known to said entity." + ::= { snmp 4 } + +snmpInBadCommunityUses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which represented an SNMP + operation which was not allowed by the SNMP + community named in the Message." + ::= { snmp 5 } + +snmpInASNParseErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ASN.1 or BER errors + encountered by the SNMP protocol entity when + decoding received SNMP Messages." + ::= { snmp 6 } + +-- { snmp 7 } is not used + +snmpInTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `tooBig'." + ::= { snmp 8 } + +snmpInNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `noSuchName'." + ::= { snmp 9 } + +snmpInBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 10 } + +snmpInReadOnlys OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number valid SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `readOnly'. It should be noted that it is a + protocol error to generate an SNMP PDU which + contains the value `readOnly' in the error-status + field, as such this object is provided as a means + of detecting incorrect implementations of the + + SNMP." + ::= { snmp 11 } + +snmpInGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 12 } + +snmpInTotalReqVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + retrieved successfully by the SNMP protocol entity + as the result of receiving valid SNMP Get-Request + and Get-Next PDUs." + ::= { snmp 13 } + +snmpInTotalSetVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + altered successfully by the SNMP protocol entity + as the result of receiving valid SNMP Set-Request + PDUs." + ::= { snmp 14 } + +snmpInGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 15 } + +snmpInGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 16 } + +snmpInSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 17 } + +snmpInGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 18 } + +snmpInTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 19 } + +snmpOutTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `tooBig.'" + ::= { snmp 20 } + +snmpOutNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status is + `noSuchName'." + ::= { snmp 21 } + +snmpOutBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 22 } + +-- { snmp 23 } is not used + +snmpOutGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 24 } + +snmpOutGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 25 } + +snmpOutGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 26 } + +snmpOutSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 27 } + +snmpOutGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 28 } + +snmpOutTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 29 } + +snmpEnableAuthenTraps OBJECT-TYPE + SYNTAX INTEGER { enabled(1), disabled(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicates whether the SNMP agent process is + permitted to generate authentication-failure + traps. The value of this object overrides any + configuration information; as such, it provides a + means whereby all authentication-failure traps may + be disabled. + + Note that it is strongly recommended that this + object be stored in non-volatile memory so that it + remains constant between re-initializations of the + network management system." + ::= { snmp 30 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMibImports b/plugins/inputs/snmp/testdata/tableMibImports new file mode 100644 index 0000000000000..1516e7cbb840f --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMibImports @@ -0,0 +1,119 @@ +RFC1155-SMI DEFINITIONS ::= BEGIN + +EXPORTS -- EVERYTHING + internet, directory, mgmt, + experimental, private, enterprises, + OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax, + ApplicationSyntax, NetworkAddress, IpAddress, + Counter, Gauge, TimeTicks, Opaque; + + -- the path to the root + + internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 } + + directory OBJECT IDENTIFIER ::= { internet 1 } + + mgmt OBJECT IDENTIFIER ::= { internet 2 } + + experimental OBJECT IDENTIFIER ::= { internet 3 } + + private OBJECT IDENTIFIER ::= { internet 4 } + enterprises OBJECT IDENTIFIER ::= { private 1 } + + -- definition of object types + + OBJECT-TYPE MACRO ::= + BEGIN + TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax) + "ACCESS" Access + "STATUS" Status + VALUE NOTATION ::= value (VALUE ObjectName) + + Access ::= "read-only" + | "read-write" + | "write-only" + | "not-accessible" + Status ::= "mandatory" + | "optional" + | "obsolete" + END + + -- names of objects in the MIB + + ObjectName ::= + OBJECT IDENTIFIER + + -- syntax of objects in the MIB + + ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that simple SEQUENCEs are not directly + -- mentioned here to keep things simple (i.e., + -- prevent mis-use). However, application-wide + -- types which are IMPLICITly encoded simple + -- SEQUENCEs may appear in the following CHOICE + + application-wide + ApplicationSyntax + } + + SimpleSyntax ::= + CHOICE { + number + INTEGER, + string + OCTET STRING, + object + OBJECT IDENTIFIER, + empty + NULL + } + + ApplicationSyntax ::= + CHOICE { + address + NetworkAddress, + counter + Counter, + gauge + Gauge, + ticks + TimeTicks, + arbitrary + Opaque + + -- other application-wide types, as they are + -- defined, will be added here + } + + -- application-wide types + + NetworkAddress ::= + CHOICE { + internet + IpAddress + } + + IpAddress ::= + [APPLICATION 0] -- in network-byte order + IMPLICIT OCTET STRING (SIZE (4)) + + Counter ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + + Gauge ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + + TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + + Opaque ::= + [APPLICATION 4] -- arbitrary ASN.1 value, + IMPLICIT OCTET STRING -- "double-wrapped" + + END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tcpMib b/plugins/inputs/snmp/testdata/tcpMib new file mode 100644 index 0000000000000..03c47224da153 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMib @@ -0,0 +1,786 @@ +TCP-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, Unsigned32, + Gauge32, Counter32, Counter64, IpAddress, mib-2, + MODULE-COMPLIANCE, OBJECT-GROUP, InetAddress, + InetAddressType, InetPortNumber + FROM tcpMibImports; + + + +tcpMIB MODULE-IDENTITY + LAST-UPDATED "200502180000Z" -- 18 February 2005 + ORGANIZATION + "IETF IPv6 MIB Revision Team + http://www.ietf.org/html.charters/ipv6-charter.html" + CONTACT-INFO + "Rajiv Raghunarayan (editor) + + Cisco Systems Inc. + 170 West Tasman Drive + San Jose, CA 95134 + + Phone: +1 408 853 9612 + Email: + + Send comments to " + DESCRIPTION + "The MIB module for managing TCP implementations. + + Copyright (C) The Internet Society (2005). This version + of this MIB module is a part of RFC 4022; see the RFC + itself for full legal notices." + REVISION "200502180000Z" -- 18 February 2005 + DESCRIPTION + "IP version neutral revision, published as RFC 4022." + REVISION "9411010000Z" + DESCRIPTION + "Initial SMIv2 version, published as RFC 2012." + REVISION "9103310000Z" + DESCRIPTION + "The initial revision of this MIB module was part of + MIB-II." + ::= { mib-2 49 } + +-- the TCP base variables group + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +-- Scalars + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4), -- Van Jacobson's algorithm + rfc2988(5) -- RFC 2988 + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The algorithm used to determine the timeout value used for + retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The minimum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides a minimum value." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides an upper bound (as part of an + adaptive backoff algorithm)." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX Integer32 (-1 | 0..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The limit on the total number of TCP connections the entity + can support. In entities where the maximum number of + connections is dynamic, this object should contain the + value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the SYN-SENT state from the CLOSED state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times TCP connections have made a direct + transition to the SYN-RCVD state from the LISTEN state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the SYN-SENT + state or the SYN-RCVD state, plus the number of times that + TCP connections have made a direct transition to the + LISTEN state from the SYN-RCVD state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the ESTABLISHED + state or the CLOSE-WAIT state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP connections for which the current state + is either ESTABLISHED or CLOSE-WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + on currently established connections. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments retransmitted; that is, the + number of TCP segments transmitted containing one or more + previously transmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 12 } + +tcpInErrs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received in error (e.g., bad + TCP checksums). + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP segments sent containing the RST flag. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 15 } + +-- { tcp 16 } was used to represent the ipv6TcpConnTable in RFC 2452, +-- which has since been obsoleted. It MUST not be used. + +tcpHCInSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + + on currently established connections. This object is + the 64-bit equivalent of tcpInSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 17 } + +tcpHCOutSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. This object is the 64-bit + equivalent of tcpOutSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 18 } + +-- The TCP Connection table + +tcpConnectionTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about existing TCP + connections. Note that unlike earlier TCP MIBs, there + is a separate table for connections in the LISTEN state." + ::= { tcp 19 } + +tcpConnectionEntry OBJECT-TYPE + SYNTAX TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpConnectionTable containing + information about a particular current TCP connection. + Each row of this table is transient in that it ceases to + exist when (or soon after) the connection makes the + transition to the CLOSED state." + INDEX { tcpConnectionLocalAddressType, + tcpConnectionLocalAddress, + tcpConnectionLocalPort, + tcpConnectionRemAddressType, + tcpConnectionRemAddress, + tcpConnectionRemPort } + ::= { tcpConnectionTable 1 } + +TcpConnectionEntry ::= SEQUENCE { + tcpConnectionLocalAddressType InetAddressType, + tcpConnectionLocalAddress InetAddress, + tcpConnectionLocalPort InetPortNumber, + tcpConnectionRemAddressType InetAddressType, + tcpConnectionRemAddress InetAddress, + tcpConnectionRemPort InetPortNumber, + tcpConnectionState INTEGER, + tcpConnectionProcess Unsigned32 + } + +tcpConnectionLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionLocalAddress." + ::= { tcpConnectionEntry 1 } + +tcpConnectionLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionLocalAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 2 } + +tcpConnectionLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnectionEntry 3 } + +tcpConnectionRemAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionRemAddress." + ::= { tcpConnectionEntry 4 } + +tcpConnectionRemAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionRemAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 5 } + +tcpConnectionRemPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnectionEntry 6 } + +tcpConnectionState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The state of this TCP connection. + + The value listen(2) is included only for parallelism to the + old tcpConnTable and should not be used. A connection in + LISTEN state should be present in the tcpListenerTable. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnectionEntry 7 } + +tcpConnectionProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this connection, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpConnectionEntry 8 } + +-- The TCP Listener table + +tcpListenerTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about TCP listeners. A + listening application can be represented in three + possible ways: + + 1. An application that is willing to accept both IPv4 and + IPv6 datagrams is represented by + + a tcpListenerLocalAddressType of unknown (0) and + a tcpListenerLocalAddress of ''h (a zero-length + octet-string). + + 2. An application that is willing to accept only IPv4 or + IPv6 datagrams is represented by a + tcpListenerLocalAddressType of the appropriate address + type and a tcpListenerLocalAddress of '0.0.0.0' or '::' + respectively. + + 3. An application that is listening for data destined + only to a specific IP address, but from any remote + system, is represented by a tcpListenerLocalAddressType + of an appropriate address type, with + tcpListenerLocalAddress as the specific local address. + + NOTE: The address type in this table represents the + address type used for the communication, irrespective + of the higher-layer abstraction. For example, an + application using IPv6 'sockets' to communicate via + IPv4 between ::ffff:10.0.0.1 and ::ffff:10.0.0.2 would + use InetAddressType ipv4(1))." + ::= { tcp 20 } + +tcpListenerEntry OBJECT-TYPE + SYNTAX TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpListenerTable containing + information about a particular TCP listener." + INDEX { tcpListenerLocalAddressType, + tcpListenerLocalAddress, + tcpListenerLocalPort } + ::= { tcpListenerTable 1 } + +TcpListenerEntry ::= SEQUENCE { + tcpListenerLocalAddressType InetAddressType, + tcpListenerLocalAddress InetAddress, + tcpListenerLocalPort InetPortNumber, + tcpListenerProcess Unsigned32 + } + +tcpListenerLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpListenerLocalAddress. The value + should be unknown (0) if connection initiations to all + local IP addresses are accepted." + ::= { tcpListenerEntry 1 } + +tcpListenerLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. + + The value of this object can be represented in three + possible ways, depending on the characteristics of the + listening application: + + 1. For an application willing to accept both IPv4 and + IPv6 datagrams, the value of this object must be + ''h (a zero-length octet-string), with the value + of the corresponding tcpListenerLocalAddressType + object being unknown (0). + + 2. For an application willing to accept only IPv4 or + IPv6 datagrams, the value of this object must be + '0.0.0.0' or '::' respectively, with + tcpListenerLocalAddressType representing the + appropriate address type. + + 3. For an application which is listening for data + destined only to a specific IP address, the value + of this object is the specific local address, with + tcpListenerLocalAddressType representing the + appropriate address type. + + As this object is used in the index for the + tcpListenerTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed, using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpListenerEntry 2 } + +tcpListenerLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpListenerEntry 3 } + +tcpListenerProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this listener, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpListenerEntry 4 } + +-- The deprecated TCP Connection table + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A table containing information about existing IPv4-specific + TCP connections or listeners. This table has been + deprecated in favor of the version neutral + tcpConnectionTable." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A conceptual row of the tcpConnTable containing information + about a particular current IPv4 TCP connection. Each row + of this table is transient in that it ceases to exist when + (or soon after) the connection makes the transition to the + CLOSED state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= SEQUENCE { + tcpConnState INTEGER, + tcpConnLocalAddress IpAddress, + tcpConnLocalPort Integer32, + tcpConnRemAddress IpAddress, + tcpConnRemPort Integer32 + + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS deprecated + DESCRIPTION + "The state of this TCP connection. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local IP address for this TCP connection. In the case + of a connection in the listen state willing to + accept connections for any IP interface associated with the + node, the value 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- conformance information + +tcpMIBConformance OBJECT IDENTIFIER ::= { tcpMIB 2 } + +tcpMIBCompliances OBJECT IDENTIFIER ::= { tcpMIBConformance 1 } +tcpMIBGroups OBJECT IDENTIFIER ::= { tcpMIBConformance 2 } + +-- compliance statements + +tcpMIBCompliance2 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for systems that implement TCP. + + A number of INDEX objects cannot be + represented in the form of OBJECT clauses in SMIv2 but + have the following compliance requirements, + expressed in OBJECT clause form in this description + clause: + + -- OBJECT tcpConnectionLocalAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + + -- and IPv6 address types. + -- + -- OBJECT tcpConnectionRemAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. + -- + -- OBJECT tcpListenerLocalAddressType + -- SYNTAX InetAddressType { unknown(0), ipv4(1), + -- ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. The type unknown also + -- needs to be supported to identify a special + -- case in the listener table: a listen using + -- both IPv4 and IPv6 addresses on the device. + -- + " + MODULE -- this module + MANDATORY-GROUPS { tcpBaseGroup, tcpConnectionGroup, + tcpListenerGroup } + GROUP tcpHCGroup + DESCRIPTION + "This group is mandatory for systems that are capable + of receiving or transmitting more than 1 million TCP + segments per second. 1 million segments per second will + cause a Counter32 to wrap in just over an hour." + OBJECT tcpConnectionState + SYNTAX INTEGER { closed(1), listen(2), synSent(3), + synReceived(4), established(5), + finWait1(6), finWait2(7), closeWait(8), + lastAck(9), closing(10), timeWait(11) } + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required, nor is support for the value + deleteTCB (12)." + ::= { tcpMIBCompliances 2 } + +tcpMIBCompliance MODULE-COMPLIANCE + STATUS deprecated + DESCRIPTION + "The compliance statement for IPv4-only systems that + implement TCP. In order to be IP version independent, this + compliance statement is deprecated in favor of + tcpMIBCompliance2. However, agents are still encouraged + to implement these objects in order to interoperate with + the deployed base of managers." + + MODULE -- this module + MANDATORY-GROUPS { tcpGroup } + OBJECT tcpConnState + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required." + ::= { tcpMIBCompliances 1 } + +-- units of conformance + +tcpGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, tcpConnState, + tcpConnLocalAddress, tcpConnLocalPort, + tcpConnRemAddress, tcpConnRemPort, + tcpInErrs, tcpOutRsts } + STATUS deprecated + DESCRIPTION + "The tcp group of objects providing for management of TCP + entities." + ::= { tcpMIBGroups 1 } + +tcpBaseGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, + tcpInErrs, tcpOutRsts } + STATUS current + DESCRIPTION + "The group of counters common to TCP entities." + ::= { tcpMIBGroups 2 } + +tcpConnectionGroup OBJECT-GROUP + OBJECTS { tcpConnectionState, tcpConnectionProcess } + STATUS current + DESCRIPTION + "The group provides general information about TCP + connections." + ::= { tcpMIBGroups 3 } + +tcpListenerGroup OBJECT-GROUP + OBJECTS { tcpListenerProcess } + STATUS current + DESCRIPTION + "This group has objects providing general information about + TCP listeners." + ::= { tcpMIBGroups 4 } + +tcpHCGroup OBJECT-GROUP + OBJECTS { tcpHCInSegs, tcpHCOutSegs } + STATUS current + DESCRIPTION + "The group of objects providing for counters of high speed + TCP implementations." + ::= { tcpMIBGroups 5 } + +END diff --git a/plugins/inputs/snmp/testdata/tcpMibImports b/plugins/inputs/snmp/testdata/tcpMibImports new file mode 100644 index 0000000000000..f3b6b9d8d52fd --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMibImports @@ -0,0 +1,639 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InetPortNumber ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "Represents a 16 bit port number of an Internet transport + + layer protocol. Port numbers are assigned by IANA. A + current list of all assignments is available from + . + + The value zero is object-specific and must be defined as + part of the description of any object that uses this + syntax. Examples of the usage of zero might include + situations where a port number is unknown, or when the + value zero is used as a wildcard in a filter." + REFERENCE "STD 6 (RFC 768), STD 7 (RFC 793) and RFC 2960" + SYNTAX Unsigned32 (0..65535) + + +InetAddress ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "Denotes a generic Internet address. + + An InetAddress value is always interpreted within the context + of an InetAddressType value. Every usage of the InetAddress + textual convention is required to specify the InetAddressType + object that provides the context. It is suggested that the + InetAddressType object be logically registered before the + object(s) that use the InetAddress textual convention, if + they appear in the same logical row. + + The value of an InetAddress object must always be + consistent with the value of the associated InetAddressType + object. Attempts to set an InetAddress object to a value + inconsistent with the associated InetAddressType + must fail with an inconsistentValue error. + + When this textual convention is used as the syntax of an + index object, there may be issues with the limit of 128 + sub-identifiers specified in SMIv2, STD 58. In this case, + the object definition MUST include a 'SIZE' clause to + limit the number of potential instance sub-identifiers; + otherwise the applicable constraints MUST be stated in + the appropriate conceptual row DESCRIPTION clauses, or + in the surrounding documentation if there is no single + DESCRIPTION clause that is appropriate." + SYNTAX OCTET STRING (SIZE (0..255)) + +InetAddressType ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "A value that represents a type of Internet address. + + unknown(0) An unknown address type. This value MUST + be used if the value of the corresponding + InetAddress object is a zero-length string. + It may also be used to indicate an IP address + that is not in one of the formats defined + below. + + ipv4(1) An IPv4 address as defined by the + InetAddressIPv4 textual convention. + + ipv6(2) An IPv6 address as defined by the + InetAddressIPv6 textual convention. + + ipv4z(3) A non-global IPv4 address including a zone + index as defined by the InetAddressIPv4z + textual convention. + + ipv6z(4) A non-global IPv6 address including a zone + index as defined by the InetAddressIPv6z + textual convention. + + dns(16) A DNS domain name as defined by the + InetAddressDNS textual convention. + + Each definition of a concrete InetAddressType value must be + accompanied by a definition of a textual convention for use + with that InetAddressType. + + To support future extensions, the InetAddressType textual + convention SHOULD NOT be sub-typed in object type definitions. + It MAY be sub-typed in compliance statements in order to + require only a subset of these address types for a compliant + implementation. + + Implementations must ensure that InetAddressType objects + and any dependent objects (e.g., InetAddress objects) are + consistent. An inconsistentValue error must be generated + if an attempt to change an InetAddressType object would, + for example, lead to an undefined InetAddress value. In + + particular, InetAddressType/InetAddress pairs must be + changed together if the address type changes (e.g., from + ipv6(2) to ipv4(1))." + SYNTAX INTEGER { + unknown(0), + ipv4(1), + ipv6(2), + ipv4z(3), + ipv6z(4), + dns(16) + } + + + + + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib deleted file mode 100644 index c6e7a2a8962b6..0000000000000 --- a/plugins/inputs/snmp/testdata/test.mib +++ /dev/null @@ -1,97 +0,0 @@ -TEST DEFINITIONS ::= BEGIN - -testOID ::= { 1 0 0 } - -testTable OBJECT-TYPE - SYNTAX SEQUENCE OF testTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 0 } - -testTableEntry OBJECT-TYPE - SYNTAX TestTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - server - } - ::= { testTable 1 } - -TestTableEntry ::= - SEQUENCE { - server OCTET STRING, - connections INTEGER, - latency OCTET STRING, - description OCTET STRING, - } - -server OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX INTEGER - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 2 } - -latency OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 3 } - -description OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 4 } - -hostname OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testOID 1 1 } - -testSecondaryTable OBJECT-TYPE - SYNTAX SEQUENCE OF testSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 3 } - -testSecondaryTableEntry OBJECT-TYPE - SYNTAX TestSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - instance - } - ::= { testSecondaryTable 1 } - -TestSecondaryTableEntry ::= - SEQUENCE { - instance OCTET STRING, - connections INTEGER, - testTableIndex INTEGER, - } - -instance OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 2 } - -testTableIndex OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 3 } -END diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index a305fe49bab8b..a7f75afe3fe3d 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -6,6 +6,11 @@ notifications (traps and inform requests). Notifications are received on plain UDP. The port to listen is configurable. +## Note about Paths + +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable + ## Configuration ```toml diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 28ae24adde62a..7bd6ba61d933d 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -3,28 +3,20 @@ package snmp_trap import ( "fmt" "net" - "os" - "path/filepath" "strconv" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/sleepinggenius2/gosmi" - "github.com/sleepinggenius2/gosmi/types" "github.com/gosnmp/gosnmp" ) var defaultTimeout = config.Duration(time.Second * 5) -type mibEntry struct { - mibName string - oidText string -} - type SnmpTrap struct { ServiceAddress string `toml:"service_address"` Timeout config.Duration `toml:"timeout"` @@ -45,7 +37,7 @@ type SnmpTrap struct { acc telegraf.Accumulator listener *gosnmp.TrapListener timeFunc func() time.Time - lookupFunc func(string) (mibEntry, error) + lookupFunc func(string) (snmp.MibEntry, error) errCh chan error makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc @@ -102,7 +94,7 @@ func init() { inputs.Add("snmp_trap", func() telegraf.Input { return &SnmpTrap{ timeFunc: time.Now, - lookupFunc: lookup, + lookupFunc: snmp.TrapLookup, ServiceAddress: "udp://:162", Timeout: defaultTimeout, Path: []string{"/usr/share/snmp/mibs"}, @@ -112,52 +104,13 @@ func init() { } func (s *SnmpTrap) Init() error { - // must init, append path for each directory, load module for every file - // or gosmi will fail without saying why - gosmi.Init() - err := s.getMibsPath() + err := snmp.LoadMibsFromPath(s.Path, s.Log) if err != nil { s.Log.Errorf("Could not get path %v", err) } return nil } -func (s *SnmpTrap) getMibsPath() error { - var folders []string - for _, mibPath := range s.Path { - gosmi.AppendPath(mibPath) - folders = append(folders, mibPath) - err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { - if info.Mode()&os.ModeSymlink != 0 { - s, _ := os.Readlink(path) - folders = append(folders, s) - } - return nil - }) - if err != nil { - s.Log.Errorf("Filepath could not be walked %v", err) - } - for _, folder := range folders { - err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - gosmi.AppendPath(path) - } else if info.Mode()&os.ModeSymlink == 0 { - _, err := gosmi.LoadModule(info.Name()) - if err != nil { - s.Log.Errorf("Module could not be loaded %v", err) - } - } - return nil - }) - if err != nil { - s.Log.Errorf("Filepath could not be walked %v", err) - } - } - folders = []string{} - } - return nil -} - func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { s.acc = acc s.listener = gosnmp.NewTrapListener() @@ -278,17 +231,16 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { func (s *SnmpTrap) Stop() { s.listener.Close() - defer gosmi.Exit() err := <-s.errCh if nil != err { s.Log.Errorf("Error stopping trap listener %v", err) } } -func setTrapOid(tags map[string]string, oid string, e mibEntry) { +func setTrapOid(tags map[string]string, oid string, e snmp.MibEntry) { tags["oid"] = oid - tags["name"] = e.oidText - tags["mib"] = e.mibName + tags["name"] = e.OidText + tags["mib"] = e.MibName } func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { @@ -348,7 +300,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - var e mibEntry + var e snmp.MibEntry var err error e, err = s.lookupFunc(val) if nil != err { @@ -356,7 +308,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - value = e.oidText + value = e.OidText // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. // If v.Name is this oid, set a tag of the trap name. @@ -374,7 +326,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - name := e.oidText + name := e.OidText fields[name] = value } @@ -396,23 +348,3 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { s.acc.AddFields("snmp_trap", fields, tags, tm) } } - -func lookup(oid string) (e mibEntry, err error) { - var node gosmi.SmiNode - node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) - - // ensure modules are loaded or node will be empty (might not error) - if err != nil { - return e, err - } - - e.oidText = node.RenderQualified() - - i := strings.Index(e.oidText, "::") - if i == -1 { - return e, fmt.Errorf("not found") - } - e.mibName = e.oidText[:i] - e.oidText = e.oidText[i+2:] - return e, nil -} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 19e9f99bda899..6c7c7df33e20f 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/testutil" ) @@ -132,7 +133,7 @@ func TestReceiveTrap(t *testing.T) { type entry struct { oid string - e mibEntry + e snmp.MibEntry } // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will @@ -180,23 +181,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -263,16 +264,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.2.3.0.55", - mibEntry{ - "enterpriseMIB", - "enterpriseOID", + snmp.MibEntry{ + MibName: "enterpriseMIB", + OidText: "enterpriseOID", }, }, }, @@ -317,16 +318,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "coldStartMIB", - "coldStartOID", + snmp.MibEntry{ + MibName: "coldStartMIB", + OidText: "coldStartOID", }, }, }, @@ -375,23 +376,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -439,23 +440,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -502,23 +503,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -564,23 +565,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -626,23 +627,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -688,23 +689,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -750,23 +751,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -812,23 +813,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -876,23 +877,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -940,23 +941,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1004,23 +1005,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1068,23 +1069,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1132,23 +1133,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1196,23 +1197,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1260,13 +1261,13 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, - lookupFunc: func(input string) (mibEntry, error) { + lookupFunc: func(input string) (snmp.MibEntry, error) { for _, entry := range tt.entries { if input == entry.oid { - return mibEntry{entry.e.mibName, entry.e.oidText}, nil + return snmp.MibEntry{MibName: entry.e.MibName, OidText: entry.e.OidText}, nil } } - return mibEntry{}, fmt.Errorf("unexpected oid") + return snmp.MibEntry{}, fmt.Errorf("unexpected oid") }, //if cold start be answer otherwise err Log: testutil.Logger{}, @@ -1376,7 +1377,7 @@ func TestGosmiSingleMib(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, - lookupFunc: lookup, + lookupFunc: snmp.TrapLookup, Log: testutil.Logger{}, Version: "2c", Path: []string{testDataPath}, From 7053fec4e78d09b96e1416976b8911c72b7bc18c Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 1 Dec 2021 07:39:07 -0700 Subject: [PATCH 095/133] fix: sysstat use unique temp file vs hard-coded (#10165) --- plugins/inputs/sysstat/sysstat.go | 34 +++++++++++++++---------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index c9ac67afcffef..3796aeb19ac58 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -10,7 +10,6 @@ import ( "io" "os" "os/exec" - "path" "strconv" "strings" "sync" @@ -66,7 +65,6 @@ type Sysstat struct { // DeviceTags adds the possibility to add additional tags for devices. DeviceTags map[string][]map[string]string `toml:"device_tags"` - tmpFile string interval int Log telegraf.Logger @@ -149,8 +147,15 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { s.interval = int(time.Since(firstTimestamp).Seconds() + 0.5) } } + + tmpfile, err := os.CreateTemp("", "sysstat-*") + if err != nil { + return fmt.Errorf("failed to create tmp file: %s", err) + } + defer os.Remove(tmpfile.Name()) + ts := time.Now().Add(time.Duration(s.interval) * time.Second) - if err := s.collect(); err != nil { + if err := s.collect(tmpfile.Name()); err != nil { return err } var wg sync.WaitGroup @@ -158,15 +163,11 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(acc telegraf.Accumulator, option string) { defer wg.Done() - acc.AddError(s.parse(acc, option, ts)) + acc.AddError(s.parse(acc, option, tmpfile.Name(), ts)) }(acc, option) } wg.Wait() - if _, err := os.Stat(s.tmpFile); err == nil { - acc.AddError(os.Remove(s.tmpFile)) - } - return nil } @@ -175,12 +176,12 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { // Sadc -S -S ... 2 tmpFile // The above command collects system metrics during and // saves it in binary form to tmpFile. -func (s *Sysstat) collect() error { +func (s *Sysstat) collect(tempfile string) error { options := []string{} for _, act := range s.Activities { options = append(options, "-S", act) } - s.tmpFile = path.Join("/tmp", fmt.Sprintf("sysstat-%d", time.Now().Unix())) + // collectInterval has to be smaller than the telegraf data collection interval collectInterval := s.interval - parseInterval @@ -189,13 +190,10 @@ func (s *Sysstat) collect() error { collectInterval = 1 // In that case we only collect for 1 second. } - options = append(options, strconv.Itoa(collectInterval), "2", s.tmpFile) + options = append(options, strconv.Itoa(collectInterval), "2", tempfile) cmd := execCommand(s.Sadc, options...) out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { - if err := os.Remove(s.tmpFile); err != nil { - s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error()) - } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } return nil @@ -229,8 +227,8 @@ func withCLocale(cmd *exec.Cmd) *exec.Cmd { // parse runs Sadf on the previously saved tmpFile: // Sadf -p -- -p