diff --git a/.circleci/config.yml b/.circleci/config.yml index 010c54a0fedfd..028198bbdb236 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,19 +1,13 @@ version: 2.1 orbs: - win: circleci/windows@2.4.0 + win: circleci/windows@2.4.0 aws-cli: circleci/aws-cli@1.4.0 executors: - go-1_15: + go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.8' - environment: - GOFLAGS: -p=8 - go-1_16: - working_directory: '/go/src/github.com/influxdata/telegraf' - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.16.6' + - image: 'quay.io/influxdb/telegraf-ci:1.17.2' environment: GOFLAGS: -p=8 mac: @@ -25,39 +19,113 @@ executors: GOFLAGS: -p=8 commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + - persist_to_workspace: + root: './new-config' + paths: + - '*' check-changed-files-or-halt: - steps: - - run: ./scripts/check-file-changes.sh - check-changed-files-or-halt-windows: - steps: - - run: - command: ./scripts/check-file-changes.sh - shell: bash.exe + steps: + - run: ./scripts/check-file-changes.sh test-go: parameters: - goarch: + os: + type: string + default: "linux" + gotestsum: type: string - default: "amd64" + default: "gotestsum" + cache_version: + type: string + default: "v3" steps: - checkout - check-changed-files-or-halt - - attach_workspace: - at: '/go' - - run: 'GOARCH=<< parameters.goarch >> make' - - run: 'GOARCH=<< parameters.goarch >> make check' - - run: 'GOARCH=<< parameters.goarch >> make check-deps' - - run: 'GOARCH=<< parameters.goarch >> make test' + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - restore_cache: + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - restore_cache: + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_mac.sh' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: rm -rf /c/Go + - restore_cache: + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_windows.sh' + - run: mkdir -p test-results + - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - run: | + PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname) + ./<< parameters.gotestsum >> --junitfile test-results/gotestsum-report.xml -- -short $PACKAGE_NAMES + - store_test_results: + path: test-results + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '~/go/src/github.com/influxdata/telegraf/gotestsum' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '/go/src/github.com/influxdata/telegraf/gotestsum' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - 'C:\Go' + - 'C:\Users\circleci\project\gotestsum.exe' + package-build: parameters: release: type: boolean default: false - nightly: - type: boolean - default: false type: type: string default: "" + nightly: + type: boolean + default: false steps: - checkout - check-changed-files-or-halt @@ -65,20 +133,19 @@ commands: at: '/go' - when: condition: << parameters.release >> - steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 make package' + steps: + - run: 'make package' - when: condition: << parameters.nightly >> - steps: - - run: 'mips=1 mipsel=1 arm64=1 amd64=1 static=1 armel=1 armhf=1 s390x=1 ppc641e=1 i386=1 windows=1 darwin=1 NIGHTLY=1 make package' - - run: 'make upload-nightly' + steps: + - run: 'NIGHTLY=1 make package include_packages="$(make << parameters.type >>)"' - unless: condition: or: - << parameters.nightly >> - << parameters.release >> steps: - - run: '<< parameters.type >>=1 make package' + - run: 'make package include_packages="$(make << parameters.type >>)"' - store_artifacts: path: './build/dist' destination: 'build/dist' @@ -88,7 +155,7 @@ commands: - 'dist' jobs: deps: - executor: go-1_16 + executor: go-1_17 steps: - checkout - restore_cache: @@ -96,6 +163,8 @@ jobs: - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' + - run: 'make check' + - run: 'make check-deps' - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -105,127 +174,172 @@ jobs: root: '/go' paths: - '*' - test-go-1_15: - executor: go-1_15 + test-go-1_17: + executor: go-1_17 steps: - test-go - test-go-1_15-386: - executor: go-1_15 - steps: - - test-go: - goarch: "386" - test-go-1_16: - executor: go-1_16 + parallelism: 4 + test-go-1_17-386: + executor: go-1_17 steps: - test-go - test-go-1_16-386: - executor: go-1_16 - steps: - - test-go: - goarch: "386" + parallelism: 4 test-go-mac: executor: mac steps: - - checkout - - restore_cache: - key: mac-go-mod-v0-{{ checksum "go.sum" }} - - check-changed-files-or-halt - - run: 'sh ./scripts/mac_installgo.sh' - - save_cache: - name: 'Saving cache' - key: mac-go-mod-v0-{{ checksum "go.sum" }} - paths: - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - run: 'make deps' - - run: 'make tidy' - - run: 'make' - - run: 'make check' - - run: 'make test' + - test-go: + os: darwin + parallelism: 4 test-go-windows: executor: name: win/default - shell: powershell.exe + shell: bash.exe steps: - - checkout - - check-changed-files-or-halt-windows - - run: choco upgrade golang --version=1.16.6 - - run: choco install make - - run: git config --system core.longpaths true - - run: make test-windows + - test-go: + os: windows + gotestsum: "gotestsum.exe" + parallelism: 4 windows-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: windows + nightly: << parameters.nightly >> darwin-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: darwin + nightly: << parameters.nightly >> i386-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: i386 - ppc641e-package: - executor: go-1_16 + nightly: << parameters.nightly >> + ppc64le-package: + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: - type: ppc641e + type: ppc64le + nightly: << parameters.nightly >> s390x-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: s390x + nightly: << parameters.nightly >> armel-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: armel + nightly: << parameters.nightly >> amd64-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: amd64 + nightly: << parameters.nightly >> arm64-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: arm64 + nightly: << parameters.nightly >> mipsel-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: mipsel + nightly: << parameters.nightly >> mips-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: mips + nightly: << parameters.nightly >> static-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: static + nightly: << parameters.nightly >> armhf-package: - executor: go-1_16 + parameters: + nightly: + type: boolean + default: false + executor: go-1_17 steps: - package-build: type: armhf + nightly: << parameters.nightly >> release: - executor: go-1_16 + executor: go-1_17 steps: - package-build: release: true nightly: - executor: go-1_16 + executor: go-1_17 steps: - - package-build: - nightly: true + - attach_workspace: + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read package-consolidate: executor: name: win/default @@ -277,20 +391,45 @@ jobs: path: './dist' destination: 'build/dist' test-awaiter: - executor: go-1_16 + executor: go-1_17 steps: - run: command: | echo "Go tests complete." share-artifacts: - executor: aws-cli/default + executor: aws-cli/default steps: - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" - curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" - + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: go-1_17 + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows + update-config: + executor: go-1_17 + steps: + - checkout + - attach_workspace: + at: '/new-config' + - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} + +commonjobs: + - &test-awaiter + 'test-awaiter': + requires: + - 'test-go-1_17' + - 'test-go-1_17-386' + workflows: version: 2 check: @@ -299,25 +438,13 @@ workflows: filters: tags: only: /.*/ - - 'test-go-1_15': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1_15-386': - requires: - - 'deps' - filters: - tags: - only: /.*/ - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' filters: tags: only: /.*/ - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' filters: @@ -331,52 +458,69 @@ workflows: filters: tags: only: /.*/ - - 'test-awaiter': - requires: - - 'test-go-1_15' - - 'test-go-1_15-386' - - 'test-go-1_16' - - 'test-go-1_16-386' + - *test-awaiter - 'windows-package': - requires: + requires: - 'test-go-windows' - 'darwin-package': - requires: + requires: - 'test-go-mac' - 'i386-package': - requires: + requires: - 'test-awaiter' - - 'ppc641e-package': - requires: + - 'ppc64le-package': + requires: - 'test-awaiter' - 's390x-package': - requires: + requires: - 'test-awaiter' - 'armel-package': - requires: + requires: - 'test-awaiter' - 'amd64-package': - requires: + requires: - 'test-awaiter' - 'arm64-package': - requires: + requires: - 'test-awaiter' - 'armhf-package': - requires: + requires: - 'test-awaiter' - 'static-package': requires: - 'test-awaiter' - 'mipsel-package': - requires: + requires: - 'test-awaiter' - 'mips-package': - requires: + requires: - 'test-awaiter' + - 'generate-config': + requires: + - 'amd64-package' + filters: + branches: + only: + - master + - 'generate-config-win': + requires: + - 'windows-package' + filters: + branches: + only: + - master + - 'update-config': + requires: + - 'generate-config-win' + - 'generate-config' + filters: + branches: + only: + - master - 'share-artifacts': requires: - 'i386-package' - - 'ppc641e-package' + - 'ppc64le-package' - 's390x-package' - 'armel-package' - 'amd64-package' @@ -395,10 +539,8 @@ workflows: requires: - 'test-go-windows' - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - - 'test-go-1_16' - - 'test-go-1_16-386' + - 'test-go-1_17' + - 'test-go-1_17-386' filters: tags: only: /.*/ @@ -412,7 +554,7 @@ workflows: only: /.*/ - 'package-sign-mac': requires: - - 'package-sign-windows' + - 'package-sign-windows' filters: tags: only: /.*/ @@ -420,28 +562,89 @@ workflows: nightly: jobs: - 'deps' - - 'test-go-1_15': - requires: - - 'deps' - - 'test-go-1_15-386': - requires: - - 'deps' - - 'test-go-1_16': + - 'test-go-1_17': requires: - 'deps' - - 'test-go-1_16-386': + - 'test-go-1_17-386': requires: - 'deps' - 'test-go-mac' - 'test-go-windows' - - 'nightly': + - *test-awaiter + - 'windows-package': + name: 'windows-package-nightly' + nightly: true requires: - 'test-go-windows' + - 'darwin-package': + name: 'darwin-package-nightly' + nightly: true + requires: - 'test-go-mac' - - 'test-go-1_15' - - 'test-go-1_15-386' - - 'test-go-1_16' - - 'test-go-1_16-386' + - 'i386-package': + name: 'i386-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'ppc64le-package': + name: 'ppc64le-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 's390x-package': + name: 's390x-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'armel-package': + name: 'armel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'amd64-package': + name: 'amd64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'arm64-package': + name: 'arm64-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'armhf-package': + name: 'armhf-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'static-package': + name: 'static-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mipsel-package': + name: 'mipsel-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - 'mips-package': + name: 'mips-package-nightly' + nightly: true + requires: + - 'test-awaiter' + - nightly: + requires: + - 'i386-package-nightly' + - 'ppc64le-package-nightly' + - 's390x-package-nightly' + - 'armel-package-nightly' + - 'amd64-package-nightly' + - 'mipsel-package-nightly' + - 'mips-package-nightly' + - 'darwin-package-nightly' + - 'windows-package-nightly' + - 'static-package-nightly' + - 'arm64-package-nightly' + - 'armhf-package-nightly' triggers: - schedule: cron: "0 7 * * *" diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000000..eb6187bc2f382 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,67 @@ +name: Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. + Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly. + - type: textarea + id: config + attributes: + label: Relevent telegraf.conf + description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks. + render: toml + validations: + required: true + - type: input + id: system-info + attributes: + label: System info + description: Include Telegraf version, operating system, and other relevant details + placeholder: ex. Telegraf 1.20.0, Ubuntu 20.04, Docker 20.10.8 + validations: + required: true + - type: textarea + id: docker + attributes: + label: Docker + description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against. + validations: + required: false + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: Describe the steps to reproduce the bug. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: Describe what you expected to happen when you performed the above steps. + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: Actual behavior + description: Describe what actually happened when you performed the above steps. + validations: + required: true + - type: textarea + id: additional-info + attributes: + label: Additional info + description: Include gist of relevant config, logs, etc. + validations: + required: false + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1c717ddbb1a15..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ show completion. --> - [ ] Updated associated README.md. - [ ] Wrote appropriate unit tests. -- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) (e.g. feat: or fix:) +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -356,6 +401,7 @@ func (m *Modbus) gatherRequestsDiscrete(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got discrete@%v[%v]: %v", request.address, request.length, bytes) // Bit value handling @@ -367,6 +413,9 @@ func (m *Modbus) gatherRequestsDiscrete(requests []request) error { request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -378,6 +427,7 @@ func (m *Modbus) gatherRequestsHolding(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got holding@%v[%v]: %v", request.address, request.length, bytes) // Non-bit value handling @@ -390,6 +440,9 @@ func (m *Modbus) gatherRequestsHolding(requests []request) error { request.fields[i].value = field.converter(bytes[offset : offset+length]) m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -401,6 +454,7 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { if err != nil { return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) m.Log.Debugf("got input@%v[%v]: %v", request.address, request.length, bytes) // Non-bit value handling @@ -413,6 +467,9 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { request.fields[i].value = field.converter(bytes[offset : offset+length]) m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } return nil } @@ -441,6 +498,11 @@ func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, ta } } +// Implement the logger interface of the modbus client +func (m *Modbus) Printf(format string, v ...interface{}) { + m.Log.Debugf(format, v...) +} + // Add this plugin to telegraf func init() { inputs.Add("modbus", func() telegraf.Input { return &Modbus{} }) diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index ddcb1971f9667..15a474e6bb66a 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -1,5 +1,7 @@ # MongoDB Input Plugin +All MongoDB server versions from 2.6 and higher are supported. + ### Configuration: ```toml diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 723b0698b9ac8..79d3d36c6c038 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "go.mongodb.org/mongo-driver/bson/primitive" + "strconv" "strings" "time" @@ -126,11 +127,29 @@ func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { }, nil } -func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { +func poolStatsCommand(version string) (string, error) { + majorPart := string(version[0]) + major, err := strconv.ParseInt(majorPart, 10, 64) + if err != nil { + return "", err + } + + if major == 5 { + return "connPoolStats", nil + } + return "shardConnPoolStats", nil +} + +func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) { + command, err := poolStatsCommand(version) + if err != nil { + return nil, err + } + shardStats := &ShardStats{} - err := s.runCommand("admin", bson.D{ + err = s.runCommand("admin", bson.D{ { - Key: "shardConnPoolStats", + Key: command, Value: 1, }, }, &shardStats) @@ -272,7 +291,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, clusterStatus = status } - shardStats, err := s.gatherShardConnPoolStats() + shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) if err != nil { s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 2cf58689a6eab..c8fd9f7c15284 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb @@ -39,3 +40,45 @@ func TestAddDefaultStats(t *testing.T) { assert.True(t, acc.HasInt64Field("mongodb", key)) } } + +func TestPoolStatsVersionCompatibility(t *testing.T) { + tests := []struct { + name string + version string + expectedCommand string + err bool + }{ + { + name: "mongodb v3", + version: "3.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v4", + version: "4.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v5", + version: "5.0.0", + expectedCommand: "connPoolStats", + }, + { + name: "invalid version", + version: "v4", + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + command, err := poolStatsCommand(test.version) + require.Equal(t, test.expectedCommand, command) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 9484118dd19ab..24aa2fe3e0d04 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 41f735d389c7a..ea69c8d424f7c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -248,14 +248,15 @@ type TransactionStats struct { // ReplStatus stores data related to replica sets. type ReplStatus struct { - SetName string `bson:"setName"` - IsMaster interface{} `bson:"ismaster"` - Secondary interface{} `bson:"secondary"` - IsReplicaSet interface{} `bson:"isreplicaset"` - ArbiterOnly interface{} `bson:"arbiterOnly"` - Hosts []string `bson:"hosts"` - Passives []string `bson:"passives"` - Me string `bson:"me"` + SetName string `bson:"setName"` + IsWritablePrimary interface{} `bson:"isWritablePrimary"` // mongodb 5.x + IsMaster interface{} `bson:"ismaster"` + Secondary interface{} `bson:"secondary"` + IsReplicaSet interface{} `bson:"isreplicaset"` + ArbiterOnly interface{} `bson:"arbiterOnly"` + Hosts []string `bson:"hosts"` + Passives []string `bson:"passives"` + Me string `bson:"me"` } // DBRecordStats stores data related to memory operations across databases. @@ -1086,8 +1087,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } @@ -1163,11 +1166,13 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if newStat.Repl != nil { returnVal.ReplSetName = newStat.Repl.SetName // BEGIN code modification - if newStat.Repl.IsMaster.(bool) { + if val, ok := newStat.Repl.IsMaster.(bool); ok && val { + returnVal.NodeType = "PRI" + } else if val, ok := newStat.Repl.IsWritablePrimary.(bool); ok && val { returnVal.NodeType = "PRI" - } else if newStat.Repl.Secondary != nil && newStat.Repl.Secondary.(bool) { + } else if val, ok := newStat.Repl.Secondary.(bool); ok && val { returnVal.NodeType = "SEC" - } else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) { + } else if val, ok := newStat.Repl.ArbiterOnly.(bool); ok && val { returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 838b1dd764d2f..65c2ac4e4b783 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -3,8 +3,8 @@ package multifile import ( "bytes" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -84,7 +84,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index 78f978fa059ee..d5b73ec7f4c1e 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -21,6 +21,10 @@ func ParseInt(value sql.RawBytes) (interface{}, error) { return v, err } +func ParseUint(value sql.RawBytes) (interface{}, error) { + return strconv.ParseUint(string(value), 10, 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -29,6 +33,10 @@ func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { return int64(0), nil } +func ParseString(value sql.RawBytes) (interface{}, error) { + return string(value), nil +} + func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html v := string(value) @@ -58,6 +66,9 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { return val, nil } + if val, err := strconv.ParseUint(string(value), 10, 64); err == nil { + return val, nil + } if val, err := strconv.ParseFloat(string(value), 64); err == nil { return val, nil } @@ -70,12 +81,29 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { } var GlobalStatusConversions = map[string]ConversionFunc{ - "ssl_ctx_verify_depth": ParseInt, - "ssl_verify_depth": ParseInt, + "innodb_available_undo_logs": ParseUint, + "innodb_buffer_pool_pages_misc": ParseUint, + "innodb_data_pending_fsyncs": ParseUint, + "ssl_ctx_verify_depth": ParseUint, + "ssl_verify_depth": ParseUint, } +// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html +// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html var GlobalVariableConversions = map[string]ConversionFunc{ - "gtid_mode": ParseGTIDMode, + "delay_key_write": ParseString, // ON, OFF, ALL + "enforce_gtid_consistency": ParseString, // ON, OFF, WARN + "event_scheduler": ParseString, // YES, NO, DISABLED + "gtid_mode": ParseGTIDMode, + "have_openssl": ParseBoolAsInteger, // alias for have_ssl + "have_ssl": ParseBoolAsInteger, // YES, DISABLED + "have_symlink": ParseBoolAsInteger, // YES, NO, DISABLED + "session_track_gtids": ParseString, + "session_track_transaction_info": ParseString, + "slave_skip_errors": ParseString, + "ssl_fips_mode": ParseString, + "transaction_write_set_extraction": ParseString, + "use_secondary_engine": ParseString, } func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) { diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 47189c18d1576..43133eeb39c1b 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -19,14 +19,14 @@ func TestConvertGlobalStatus(t *testing.T) { name: "default", key: "ssl_ctx_verify_depth", value: []byte("0"), - expected: int64(0), + expected: uint64(0), expectedErr: nil, }, { name: "overflow int64", key: "ssl_ctx_verify_depth", value: []byte("18446744073709551615"), - expected: int64(9223372036854775807), + expected: uint64(18446744073709551615), expectedErr: nil, }, { diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 53f688bb3bcd9..7144355096b4e 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,10 +1,11 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats import ( "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -55,7 +56,7 @@ func (n *Nats) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index 08d08ba760df0..f50ba2cfcf678 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd && !cgo // +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index 7207df94cfd02..135951405feda 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index dad4c8e5857f6..c2bb05384d7c8 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -5,7 +5,7 @@ package neptuneapex import ( "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -276,7 +276,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 7e1e753c5ff76..5cd7e76aec439 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -70,7 +70,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index fb40643409056..42e0cab62d53e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" @@ -153,7 +152,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 681c2f6e7f460..58f60192b96d0 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -25,7 +25,7 @@ package nsq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -131,7 +131,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { return fmt.Errorf(`error reading body: %s`, err) } diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5bc2bc85a3136..4408b8f728579 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -2,7 +2,6 @@ package nstat import ( "bytes" - "io/ioutil" "os" "strconv" @@ -62,7 +61,7 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } @@ -71,14 +70,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index ea5887ae10a5d..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -139,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index d6530c0839b18..f28981f7482ae 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -46,6 +46,12 @@ Plugin minimum tested version: 1.16 ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index 53454ba8816a7..d59adc453ba8b 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -12,6 +12,7 @@ import ( "github.com/gopcua/opcua/ua" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -26,6 +27,7 @@ type OpcUA struct { PrivateKey string `toml:"private_key"` Username string `toml:"username"` Password string `toml:"password"` + Timestamp string `toml:"timestamp"` AuthMethod string `toml:"auth_method"` ConnectTimeout config.Duration `toml:"connect_timeout"` RequestTimeout config.Duration `toml:"request_timeout"` @@ -77,12 +79,12 @@ type GroupSettings struct { // OPCData type type OPCData struct { - TagName string - Value interface{} - Quality ua.StatusCode - TimeStamp string - Time string - DataType ua.TypeID + TagName string + Value interface{} + Quality ua.StatusCode + ServerTime time.Time + SourceTime time.Time + DataType ua.TypeID } // ConnectionState used for constants @@ -136,6 +138,12 @@ const sampleConfig = ` ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration ## name - field name to use in the output ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -188,7 +196,12 @@ func (o *OpcUA) SampleConfig() string { func (o *OpcUA) Init() error { o.state = Disconnected - err := o.validateEndpoint() + err := choice.Check(o.Timestamp, []string{"", "gather", "server", "source"}) + if err != nil { + return err + } + + err = o.validateEndpoint() if err != nil { return err } @@ -406,10 +419,10 @@ func Connect(o *OpcUA) error { o.state = Connecting if o.client != nil { - if err := o.client.CloseSession(); err != nil { + if err := o.client.Close(); err != nil { // Only log the error but to not bail-out here as this prevents // reconnections for multiple parties (see e.g. #9523). - o.Log.Errorf("Closing session failed: %v", err) + o.Log.Errorf("Closing connection failed: %v", err) } } @@ -445,8 +458,10 @@ func Connect(o *OpcUA) error { } func (o *OpcUA) setupOptions() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() // Get a list of the endpoints for our target server - endpoints, err := opcua.GetEndpoints(o.Endpoint) + endpoints, err := opcua.GetEndpoints(ctx, o.Endpoint) if err != nil { return err } @@ -483,8 +498,9 @@ func (o *OpcUA) getData() error { o.nodeData[i].Value = d.Value.Value() o.nodeData[i].DataType = d.Value.Type() } - o.nodeData[i].TimeStamp = d.ServerTimestamp.String() - o.nodeData[i].Time = d.SourceTimestamp.String() + o.nodeData[i].Quality = d.Status + o.nodeData[i].ServerTime = d.ServerTimestamp + o.nodeData[i].SourceTime = d.SourceTimestamp } return nil } @@ -549,6 +565,15 @@ func (o *OpcUA) Gather(acc telegraf.Accumulator) error { fields[o.nodeData[i].TagName] = o.nodeData[i].Value fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) acc.AddFields(n.metricName, fields, tags) + + switch o.Timestamp { + case "server": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].ServerTime) + case "source": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].SourceTime) + default: + acc.AddFields(n.metricName, fields, tags) + } } } return nil @@ -562,6 +587,7 @@ func init() { Endpoint: "opc.tcp://localhost:4840", SecurityPolicy: "auto", SecurityMode: "auto", + Timestamp: "gather", RequestTimeout: config.Duration(5 * time.Second), ConnectTimeout: config.Duration(10 * time.Second), Certificate: "/etc/telegraf/cert.pem", diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index bb7ca56200954..e1304fa304fc6 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" "log" "math/big" "net" @@ -27,7 +26,7 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index f5fa450fa8f65..1c805e2a23ff2 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -56,7 +56,7 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { err := s.converter.WriteMetrics(ctx, req, s.writer) - return otlpgrpc.MetricsResponse{}, err + return otlpgrpc.NewMetricsResponse(), err } type logsService struct { diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go index 2e6cbf9b8349a..85f32a7695efa 100644 --- a/plugins/inputs/opentelemetry/opentelemetry.go +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -24,6 +24,7 @@ type OpenTelemetry struct { Log telegraf.Logger `toml:"-"` + listener net.Listener // overridden in tests grpcServer *grpc.Server wg sync.WaitGroup @@ -89,14 +90,16 @@ func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { otlpgrpc.RegisterMetricsServer(o.grpcServer, ms) otlpgrpc.RegisterLogsServer(o.grpcServer, newLogsService(logger, influxWriter)) - listener, err := net.Listen("tcp", o.ServiceAddress) - if err != nil { - return err + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } } o.wg.Add(1) go func() { - if err := o.grpcServer.Serve(listener); err != nil { + if err := o.grpcServer.Serve(o.listener); err != nil { accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) } o.wg.Done() diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..2de35bb06af50 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,83 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.New( + simple.NewWithExactDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher.MeterProvider()) + + // write metrics + meter := global.Meter("library-name") + counter := metric.Must(meter).NewInt64Counter("measurement-counter") + meter.RecordBatch(context.Background(), nil, counter.Measurement(7)) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + assert.Empty(t, accumulator.Errors) + + if assert.Len(t, accumulator.Metrics, 1) { + got := accumulator.Metrics[0] + assert.Equal(t, "measurement-counter", got.Measurement) + assert.Equal(t, telegraf.Counter, got.Type) + assert.Equal(t, "library-name", got.Tags["otel.library.name"]) + } +} diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index dbee336ba1040..ecbeeb532fd1e 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,7 +2,6 @@ package passenger import ( "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -28,7 +27,7 @@ func fakePassengerStatus(stat string) (string, error) { } tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) - if err := ioutil.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { return "", err } diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 9ac7e60715856..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -295,7 +294,7 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // can properly cut off the client sending all the data. // For now just bound it a little and //nolint:errcheck,revive - io.CopyN(ioutil.Discard, body, 100<<20) + io.CopyN(io.Discard, body, 100<<20) //nolint:errcheck,revive body.Close() diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index a7234225806cc..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,7 +8,6 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -274,7 +273,7 @@ func TestMalformedParams(_ *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 50d8d604efb5b..d51c576aad7f0 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 5829d6bd07283..10744a9b15e99 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -100,7 +100,7 @@ LimitNOFILE=8192 Restart Telegraf: ```sh -$ systemctl edit telegraf +$ systemctl restart telegraf ``` #### Linux Permissions diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index a014a8237e8e7..f6bd751c2a4e3 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 895b9c1fdf5b9..7faba097c4562 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index f53d6f09a7373..1d3d933e7736b 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 0986d58bc74a8..6df8af3732a5f 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index f72474a114f94..e2d271f51cba1 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // postfix doesn't aim for Windows diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index ad997eebdbbe7..6ab6556a0cf07 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,9 +1,9 @@ +//go:build !windows // +build !windows package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -14,7 +14,7 @@ import ( ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) @@ -22,12 +22,12 @@ func TestGather(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go index 122c1543da55d..3a2c5e5cb3619 100644 --- a/plugins/inputs/postfix/postfix_windows.go +++ b/plugins/inputs/postfix/postfix_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package postfix diff --git a/plugins/inputs/postfix/stat_ctim.go b/plugins/inputs/postfix/stat_ctim.go index 456df5ffd4dd2..06ddccb178fce 100644 --- a/plugins/inputs/postfix/stat_ctim.go +++ b/plugins/inputs/postfix/stat_ctim.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || netbsd || openbsd || solaris // +build dragonfly linux netbsd openbsd solaris package postfix diff --git a/plugins/inputs/postfix/stat_ctimespec.go b/plugins/inputs/postfix/stat_ctimespec.go index 40e0de6cc4a40..03f4e0a435f2c 100644 --- a/plugins/inputs/postfix/stat_ctimespec.go +++ b/plugins/inputs/postfix/stat_ctimespec.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd // +build darwin freebsd package postfix diff --git a/plugins/inputs/postfix/stat_none.go b/plugins/inputs/postfix/stat_none.go index d9b67b1663af8..c1ca6a41c662f 100644 --- a/plugins/inputs/postfix/stat_none.go +++ b/plugins/inputs/postfix/stat_none.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !linux && !netbsd && !openbsd && !solaris && !darwin && !freebsd // +build !dragonfly,!linux,!netbsd,!openbsd,!solaris,!darwin,!freebsd package postfix diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 8311064b1f060..176827a4b1dc7 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,7 +3,7 @@ package postgresql_extensible import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strings" "time" @@ -147,7 +147,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 9faec83afa7d0..070dce65fe2a0 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes @@ -5,7 +6,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -191,7 +191,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index de04fecb56fc1..144b80f3fc1ec 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 567373c7c7260..f798a1668c738 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package processes diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index d5d8b8b36fe70..05cf4a72735f0 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -43,7 +43,7 @@ func (pg *NativeFinder) UID(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 9d7409ba1df8e..528b083ae628b 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 85e8d80f83cfe..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index b838df651f636..09b5cc7cfa325 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -3,7 +3,6 @@ package procstat import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -156,17 +155,23 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { } p.procs = newProcs - for _, proc := range p.procs { p.addMetric(proc, acc, now) } + tags := make(map[string]string) + for _, pidTag := range pidTags { + for key, value := range pidTag.Tags { + tags[key] = value + } + } + fields := map[string]interface{}{ "pid_count": pidCount, "running": len(p.procs), "result_code": 0, } - tags := make(map[string]string) + tags["pid_finder"] = p.PidFinder tags["result"] = "success" acc.AddFields("procstat_lookup", fields, tags, now) @@ -474,7 +479,7 @@ func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } - pid, err := strconv.Atoi(string(kv[1])) + pid, err := strconv.ParseInt(string(kv[1]), 10, 32) if err != nil { return nil, fmt.Errorf("invalid pid '%s'", kv[1]) } @@ -516,7 +521,7 @@ func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { return nil, fmt.Errorf("not a directory %s", path) } procsPath := filepath.Join(path, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 2d8687e75013b..bc586fca4fa42 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -385,10 +384,10 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") + td, err := os.MkdirTemp("", "") require.NoError(t, err) defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + err = os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index a0a776d33736f..b7efcee17cdc1 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 06dffc8472089..5d9c196e388c0 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package procstat diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index c826fd0e015ab..fe6d3a8e816da 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -23,6 +23,10 @@ in Prometheus format. ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -158,20 +162,20 @@ Authorization header. ### Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). ### Metrics: diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index e78c64af3fcd4..9a4d6bd325c46 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -5,11 +5,10 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" - "log" "net" "net/http" "net/url" + "os" "os/user" "path/filepath" "time" @@ -41,7 +40,7 @@ const cAdvisorPodListDefaultInterval = 60 // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { - data, err := ioutil.ReadFile(kubeconfigPath) + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } @@ -111,47 +110,54 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) LabelSelector: p.KubernetesLabelSelector, FieldSelector: p.KubernetesFieldSelector, }) + defer watcher.Stop() if err != nil { return err } - pod := &corev1.Pod{} - go func() { - for event := range watcher.ResultChan() { - pod = &corev1.Pod{} - // If the pod is not "ready", there will be no ip associated with it. - if pod.Annotations["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.ContainerStatuses) { - continue - } - switch event.Type { - case watch.Added: - registerPod(pod, p) - case watch.Modified: - // To avoid multiple actions for each event, unregister on the first event - // in the delete sequence, when the containers are still "ready". - if pod.GetDeletionTimestamp() != nil { - unregisterPod(pod, p) - } else { + for { + select { + case <-ctx.Done(): + return nil + default: + for event := range watcher.ResultChan() { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return fmt.Errorf("Unexpected object when getting pods") + } + + // If the pod is not "ready", there will be no ip associated with it. + if pod.Annotations["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.ContainerStatuses) { + continue + } + + switch event.Type { + case watch.Added: registerPod(pod, p) + case watch.Modified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } } } } - }() - - return nil + } } func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { // The request will be the same each time podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) req, err := http.NewRequest("GET", podsURL, nil) - req.Header.Set("Authorization", "Bearer "+bearerToken) - req.Header.Add("Accept", "application/json") - if err != nil { return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) } + req.Header.Set("Authorization", "Bearer "+bearerToken) + req.Header.Add("Accept", "application/json") // Update right away so code is not waiting the length of the specified scrape interval initially err = updateCadvisorPodList(p, req) @@ -288,12 +294,15 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} } - targetURL := getScrapeURL(pod) - if targetURL == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("could not parse URL: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) + p.Log.Debugf("will scrape metrics from %q", targetURL.String()) // add annotation as metrics tags tags := pod.Annotations if tags == nil { @@ -305,12 +314,7 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { for k, v := range pod.Labels { tags[k] = v } - URL, err := url.Parse(*targetURL) - if err != nil { - log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) - return - } - podURL := p.AddressToURL(URL, URL.Hostname()) + podURL := p.AddressToURL(targetURL, targetURL.Hostname()) // Locks earlier if using cAdvisor calls - makes a new list each time // rather than updating and removing from the same list @@ -320,22 +324,22 @@ func registerPod(pod *corev1.Pod, p *Prometheus) { } p.kubernetesPods[podURL.String()] = URLAndAddress{ URL: podURL, - Address: URL.Hostname(), - OriginalURL: URL, + Address: targetURL.Hostname(), + OriginalURL: targetURL, Tags: tags, } } -func getScrapeURL(pod *corev1.Pod) *string { +func getScrapeURL(pod *corev1.Pod) (*url.URL, error) { ip := pod.Status.PodIP if ip == "" { // return as if scrape was disabled, we will be notified again once the pod // has an IP - return nil + return nil, nil } scheme := pod.Annotations["prometheus.io/scheme"] - path := pod.Annotations["prometheus.io/path"] + pathAndQuery := pod.Annotations["prometheus.io/path"] port := pod.Annotations["prometheus.io/port"] if scheme == "" { @@ -344,34 +348,36 @@ func getScrapeURL(pod *corev1.Pod) *string { if port == "" { port = "9102" } - if path == "" { - path = "/metrics" + if pathAndQuery == "" { + pathAndQuery = "/metrics" } - u := &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(ip, port), - Path: path, + base, err := url.Parse(pathAndQuery) + if err != nil { + return nil, err } - x := u.String() + base.Scheme = scheme + base.Host = net.JoinHostPort(ip, port) - return &x + return base, nil } func unregisterPod(pod *corev1.Pod, p *Prometheus) { - url := getScrapeURL(pod) - if url == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("failed to parse url: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", - pod.Name, pod.Namespace) + p.Log.Debugf("registered a delete request for %q in namespace %q", pod.Name, pod.Namespace) p.lock.Lock() defer p.lock.Unlock() - if _, ok := p.kubernetesPods[*url]; ok { - delete(p.kubernetesPods, *url) - log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) + if _, ok := p.kubernetesPods[targetURL.String()]; ok { + delete(p.kubernetesPods, targetURL.String()) + p.Log.Debugf("will stop scraping for %q", targetURL.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 72f995c3112c9..2f67607cd3cf3 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -15,7 +15,8 @@ import ( func TestScrapeURLNoAnnotations(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Annotations = map[string]string{} - url := getScrapeURL(p) + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } @@ -23,36 +24,57 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Name = "myPod" p.Annotations = map[string]string{"prometheus.io/scrape": "false"} - url := getScrapeURL(p) + url, err := getScrapeURL(p) + assert.NoError(t, err) assert.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} + url, err := getScrapeURL(p) + assert.NoError(t, err) + assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 7d3140dc7d627..dfe5cc4749813 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/common/expfmt" ) -func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { +func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { var parser expfmt.TextParser var metrics []telegraf.Metric var err error @@ -76,7 +76,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !ignoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { t = now diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 293e1968d2b5d..ffd5967458c9f 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -1,8 +1,10 @@ package prometheus import ( + "fmt" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -42,7 +44,7 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 func TestParseValidPrometheus(t *testing.T) { // Gauge value - metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) + metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) @@ -58,7 +60,7 @@ func TestParseValidPrometheus(t *testing.T) { }, metrics[0].Tags()) // Counter value - metrics, err = Parse([]byte(validUniqueCounter), http.Header{}) + metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "get_token_fail_count", metrics[0].Name()) @@ -69,7 +71,7 @@ func TestParseValidPrometheus(t *testing.T) { // Summary data //SetDefaultTags(map[string]string{}) - metrics, err = Parse([]byte(validUniqueSummary), http.Header{}) + metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) @@ -83,7 +85,7 @@ func TestParseValidPrometheus(t *testing.T) { assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data - metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}) + metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) assert.NoError(t, err) assert.Len(t, metrics, 1) assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) @@ -103,3 +105,38 @@ func TestParseValidPrometheus(t *testing.T) { map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) } + +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + + // IgnoreTimestamp is false + metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.Equal(t, testTime, metrics[0].Time().UTC()) + + // IgnoreTimestamp is true + metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) + assert.NoError(t, err) + assert.Len(t, metrics, 1) + assert.Equal(t, "test_counter", metrics[0].Name()) + assert.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + assert.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index adeb452253a37..18cbf6c8b3d59 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -58,6 +58,8 @@ type Prometheus struct { URLTag string `toml:"url_tag"` + IgnoreTimestamp bool `toml:"ignore_timestamp"` + tls.ClientConfig Log telegraf.Logger @@ -101,6 +103,10 @@ var sampleConfig = ` ## Url tag name (tag containing scrapped url. optional, default is "url") # url_tag = "url" + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -382,7 +388,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -408,16 +414,19 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } if p.MetricVersion == 2 { - parser := parser_v2.Parser{Header: resp.Header} + parser := parser_v2.Parser{ + Header: resp.Header, + IgnoreTimestamp: p.IgnoreTimestamp, + } metrics, err = parser.Parse(body) } else { - metrics, err = Parse(body, resp.Header) + metrics, err = Parse(body, resp.Header, p.IgnoreTimestamp) } if err != nil { diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index ea8ca0e9346ab..11117e05b45d9 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -242,6 +242,29 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } +func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) + })) + defer ts.Close() + + p := &Prometheus{ + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", + IgnoreTimestamp: true, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + m, _ := acc.Get("test_metric") + assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second) +} + func TestUnsupportedFieldSelector(t *testing.T) { fieldSelectorString := "spec.containerName=container" prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index ec34a7b2f5a36..efd7fae7d5d5f 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -3,7 +3,7 @@ package proxmox import ( "encoding/json" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -115,7 +115,7 @@ func performRequest(px *Proxmox, apiURL string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 687005b98cc11..1406064d5c617 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -85,18 +85,19 @@ Meta: - tags: `` Measurement names: + - puppetagent_changes_total - puppetagent_events_failure - puppetagent_events_total - puppetagent_events_success + - puppetagent_resources_changed + - puppetagent_resources_corrective_change - puppetagent_resources_failed + - puppetagent_resources_failedtorestart + - puppetagent_resources_outofsync + - puppetagent_resources_restarted - puppetagent_resources_scheduled - - puppetagent_resources_changed - puppetagent_resources_skipped - puppetagent_resources_total - - puppetagent_resources_failedtorestart - - puppetagent_resources_restarted - - puppetagent_resources_outofsync - - puppetagent_changes_total - puppetagent_time_service - puppetagent_time_lastrun - puppetagent_version_config @@ -108,18 +109,26 @@ Meta: - tags: `` Measurement names: - - puppetagent_time_user - - puppetagent_time_schedule - - puppetagent_time_filebucket - - puppetagent_time_file - - puppetagent_time_exec - puppetagent_time_anchor - - puppetagent_time_sshauthorizedkey - - puppetagent_time_package - - puppetagent_time_total + - puppetagent_time_catalogapplication - puppetagent_time_configretrieval - - puppetagent_time_lastrun + - puppetagent_time_convertcatalog - puppetagent_time_cron + - puppetagent_time_exec + - puppetagent_time_factgeneration + - puppetagent_time_file + - puppetagent_time_filebucket + - puppetagent_time_group + - puppetagent_time_lastrun + - puppetagent_time_noderetrieval + - puppetagent_time_notify + - puppetagent_time_package + - puppetagent_time_pluginsync + - puppetagent_time_schedule + - puppetagent_time_sshauthorizedkey + - puppetagent_time_total + - puppetagent_time_transactionevaluation + - puppetagent_time_user - puppetagent_version_config #### PuppetAgent string measurements: diff --git a/plugins/inputs/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml index be2f017465fad..c1aa1ce276216 100644 --- a/plugins/inputs/puppetagent/last_run_summary.yaml +++ b/plugins/inputs/puppetagent/last_run_summary.yaml @@ -1,34 +1,43 @@ --- events: failure: 0 + noop: 0 total: 0 success: 0 resources: + changed: 0 + corrective_change: 0 failed: 0 + failed_to_restart: 0 + out_of_sync: 0 + restarted: 0 scheduled: 0 - changed: 0 skipped: 0 total: 109 - failed_to_restart: 0 - restarted: 0 - out_of_sync: 0 changes: total: 0 time: - user: 0.004331 - schedule: 0.001123 - filebucket: 0.000353 - file: 0.441472 - exec: 0.508123 anchor: 0.000555 - yumrepo: 0.006989 - ssh_authorized_key: 0.000764 - service: 1.807795 - package: 1.325788 - total: 8.85354707064819 + catalog_application: 0.010555 config_retrieval: 4.75567007064819 - last_run: 1444936531 + convert_catalog: 1.3 cron: 0.000584 + exec: 0.508123 + fact_generation: 0.34 + file: 0.441472 + filebucket: 0.000353 + last_run: 1444936531 + node_retrieval: 1.235 + notify: 0.00035 + package: 1.325788 + plugin_sync: 0.325788 + schedule: 0.001123 + service: 1.807795 + ssh_authorized_key: 0.000764 + total: 8.85354707064819 + transaction_evaluation: 4.69765 + user: 0.004331 + yumrepo: 0.006989 version: config: "environment:d6018ce" puppet: "3.7.5" diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 741de4a0dc013..f31e03d327817 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -2,12 +2,12 @@ package puppetagent import ( "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -32,19 +32,21 @@ type State struct { type event struct { Failure int64 `yaml:"failure"` + Noop int64 `yaml:"noop"` Total int64 `yaml:"total"` Success int64 `yaml:"success"` } type resource struct { - Failed int64 `yaml:"failed"` - Scheduled int64 `yaml:"scheduled"` - Changed int64 `yaml:"changed"` - Skipped int64 `yaml:"skipped"` - Total int64 `yaml:"total"` - FailedToRestart int64 `yaml:"failed_to_restart"` - Restarted int64 `yaml:"restarted"` - OutOfSync int64 `yaml:"out_of_sync"` + Changed int64 `yaml:"changed"` + CorrectiveChange int64 `yaml:"corrective_change"` + Failed int64 `yaml:"failed"` + FailedToRestart int64 `yaml:"failed_to_restart"` + OutOfSync int64 `yaml:"out_of_sync"` + Restarted int64 `yaml:"restarted"` + Scheduled int64 `yaml:"scheduled"` + Skipped int64 `yaml:"skipped"` + Total int64 `yaml:"total"` } type change struct { @@ -52,19 +54,27 @@ type change struct { } type time struct { - User float64 `yaml:"user"` - Schedule float64 `yaml:"schedule"` - FileBucket float64 `yaml:"filebucket"` - File float64 `yaml:"file"` - Exec float64 `yaml:"exec"` - Anchor float64 `yaml:"anchor"` - SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` - Service float64 `yaml:"service"` - Package float64 `yaml:"package"` - Total float64 `yaml:"total"` - ConfigRetrieval float64 `yaml:"config_retrieval"` - LastRun int64 `yaml:"last_run"` - Cron float64 `yaml:"cron"` + Anchor float64 `yaml:"anchor"` + CataLogApplication float64 `yaml:"catalog_application"` + ConfigRetrieval float64 `yaml:"config_retrieval"` + ConvertCatalog float64 `yaml:"convert_catalog"` + Cron float64 `yaml:"cron"` + Exec float64 `yaml:"exec"` + FactGeneration float64 `yaml:"fact_generation"` + File float64 `yaml:"file"` + FileBucket float64 `yaml:"filebucket"` + Group float64 `yaml:"group"` + LastRun int64 `yaml:"last_run"` + NodeRetrieval float64 `yaml:"node_retrieval"` + Notify float64 `yaml:"notify"` + Package float64 `yaml:"package"` + PluginSync float64 `yaml:"plugin_sync"` + Schedule float64 `yaml:"schedule"` + Service float64 `yaml:"service"` + SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` + Total float64 `yaml:"total"` + TransactionEvaluation float64 `yaml:"transaction_evaluation"` + User float64 `yaml:"user"` } type version struct { @@ -92,7 +102,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index 6ba769ac5dd37..754fb39783a2a 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -17,33 +17,43 @@ func TestGather(t *testing.T) { tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ - "events_failure": int64(0), - "events_total": int64(0), - "events_success": int64(0), - "resources_failed": int64(0), - "resources_scheduled": int64(0), - "resources_changed": int64(0), - "resources_skipped": int64(0), - "resources_total": int64(109), - "resources_failedtorestart": int64(0), - "resources_restarted": int64(0), - "resources_outofsync": int64(0), - "changes_total": int64(0), - "time_lastrun": int64(1444936531), - "version_configstring": "environment:d6018ce", - "time_user": float64(0.004331), - "time_schedule": float64(0.001123), - "time_filebucket": float64(0.000353), - "time_file": float64(0.441472), - "time_exec": float64(0.508123), - "time_anchor": float64(0.000555), - "time_sshauthorizedkey": float64(0.000764), - "time_service": float64(1.807795), - "time_package": float64(1.325788), - "time_total": float64(8.85354707064819), - "time_configretrieval": float64(4.75567007064819), - "time_cron": float64(0.000584), - "version_puppet": "3.7.5", + "events_failure": int64(0), + "events_noop": int64(0), + "events_success": int64(0), + "events_total": int64(0), + "resources_changed": int64(0), + "resources_correctivechange": int64(0), + "resources_failed": int64(0), + "resources_failedtorestart": int64(0), + "resources_outofsync": int64(0), + "resources_restarted": int64(0), + "resources_scheduled": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "changes_total": int64(0), + "time_anchor": float64(0.000555), + "time_catalogapplication": float64(0.010555), + "time_configretrieval": float64(4.75567007064819), + "time_convertcatalog": float64(1.3), + "time_cron": float64(0.000584), + "time_exec": float64(0.508123), + "time_factgeneration": float64(0.34), + "time_file": float64(0.441472), + "time_filebucket": float64(0.000353), + "time_group": float64(0), + "time_lastrun": int64(1444936531), + "time_noderetrieval": float64(1.235), + "time_notify": float64(0.00035), + "time_package": float64(1.325788), + "time_pluginsync": float64(0.325788), + "time_schedule": float64(0.001123), + "time_service": float64(1.807795), + "time_sshauthorizedkey": float64(0.000764), + "time_total": float64(8.85354707064819), + "time_transactionevaluation": float64(4.69765), + "time_user": float64(0.004331), + "version_configstring": "environment:d6018ce", + "version_puppet": "3.7.5", } acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 13be5f63b1619..158b8d5ed6b21 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -3,7 +3,7 @@ package rabbitmq import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strconv" "sync" @@ -431,7 +431,7 @@ func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (r *RabbitMQ) requestJSON(u string, target interface{}) error { diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 830819b0528e4..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -2,9 +2,9 @@ package rabbitmq import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" + "os" "time" "testing" @@ -37,7 +37,7 @@ func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -247,7 +247,7 @@ func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8599c4a78d0f..a8d4ba727d7df 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go index 74f0aaf9fc59f..b0795fd794f6f 100644 --- a/plugins/inputs/ras/ras_notlinux.go +++ b/plugins/inputs/ras/ras_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux || (linux && !386 && !amd64 && !arm && !arm64) // +build !linux linux,!386,!amd64,!arm,!arm64 package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index a90258bb4423b..656200fde95cc 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go index 42eaea3fb3e3b..3da1d0190a055 100644 --- a/plugins/inputs/ravendb/ravendb_test.go +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -1,9 +1,9 @@ package ravendb import ( - "io/ioutil" "net/http" "net/http/httptest" + "os" "testing" "time" @@ -30,7 +30,7 @@ func TestRavenDBGeneratesMetricsFull(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) @@ -225,7 +225,7 @@ func TestRavenDBGeneratesMetricsMin(t *testing.T) { require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) _, err = w.Write(data) diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 4d9e70a57a9bd..dcf26b192c651 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -3,7 +3,7 @@ package redfish import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -199,7 +199,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index 4327a28bb98ee..bd89ea75346b2 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -10,15 +10,21 @@ ## e.g. ## tcp://localhost:6379 ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock ## ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index fdc5dcd14cb12..b66d4ea41d36b 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -32,8 +32,8 @@ type Redis struct { Log telegraf.Logger - clients []Client - initialized bool + clients []Client + connected bool } type Client interface { @@ -201,9 +201,13 @@ var sampleConfig = ` ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" @@ -230,8 +234,18 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init() error { - if r.initialized { +func (r *Redis) Init() error { + for _, command := range r.Commands { + if command.Type != "string" && command.Type != "integer" && command.Type != "float" { + return fmt.Errorf(`unknown result type: expected one of "string", "integer", "float"; got %q`, command.Type) + } + } + + return nil +} + +func (r *Redis) connect() error { + if r.connected { return nil } @@ -299,15 +313,15 @@ func (r *Redis) init() error { } } - r.initialized = true + r.connected = true return nil } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { - if !r.initialized { - err := r.init() + if !r.connected { + err := r.connect() if err != nil { return err } @@ -333,6 +347,10 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err for _, command := range r.Commands { val, err := client.Do(command.Type, command.Command...) if err != nil { + if strings.Contains(err.Error(), "unexpected type=") { + return fmt.Errorf("could not get command result: %s", err) + } + return err } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 82ff292804a8c..0119131900b61 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index 9a09864cad91a..651042ab13783 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index 24bdd11540e1b..a38d5989cb5d0 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -368,7 +368,7 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { // Handle cancellations from the process func processOsSignals(cancelFunc context.CancelFunc) { - signalChan := make(chan os.Signal) + signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt) for { sig := <-signalChan diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index f1ecff8d61a83..f7c321d7ae978 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -203,11 +202,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index d3a8ba762f379..f2590c105272a 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go index 62a6211598f4e..424e96181b46b 100644 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 6bf1b616cb985..be4cace6eab79 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 0d52881a72f04..3728cddb34349 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -201,6 +201,113 @@ One [metric][] is created for each row of the SNMP table. ## Specifies if the value of given field should be snmptranslated ## by default no field values are translated # translate = true + + ## Secondary index table allows to merge data from two tables with + ## different index that this filed will be used to join them. There can + ## be only one secondary index table. + # secondary_index_table = false + + ## This field is using secondary index, and will be later merged with + ## primary index using SecondaryIndexTable. SecondaryIndexTable and + ## SecondaryIndexUse are exclusive. + # secondary_index_use = false + + ## Controls if entries from secondary table should be added or not + ## if joining index is present or not. I set to true, means that join + ## is outer, and index is prepended with "Secondary." for missing values + ## to avoid overlaping indexes from both tables. Can be set per field or + ## globally with SecondaryIndexTable, global true overrides per field false. + # secondary_outer_join = false +``` + +##### Two Table Join +Snmp plugin can join two snmp tables that have different indexes. For this to work one table +should have translation field that return index of second table as value. Examples +of such fields are: + * Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, +which value is IfIndex from ifTable + * Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +which value is IfIndex from ifTable + * Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +which value is index from entPhysicalTable + +Such field can be used to translate index to secondary table with `secondary_index_table = true` +and all fields from secondary table (with index pointed from translation field), should have added option +`secondary_index_use = true`. Telegraf cannot duplicate entries during join so translation +must be 1-to-1 (not 1-to-many). To add fields from secondary table with index that is not present +in translation table (outer join), there is a second option for translation index `secondary_outer_join = true`. + +###### Example configuration for table joins + +CISCO-POWER-ETHERNET-EXT-MIB table before join: +``` +[[inputs.snmp.table]] +name = "ciscoPower" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +``` + +Partial result (removed agent_host and host columns from all following outputs in this section): +``` +> ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 +> ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 +> ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 +``` + +Note here that EntPhyIndex column carries index from ENTITY-MIB table, config for it: +``` +[[inputs.snmp.table]] +name = "entityTable" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +``` +Partial result: +``` +> entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 +> entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 +> entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 +``` + +Now, lets attempt to join these results into one table. EntPhyIndex matches index +from second table, and lets convert EntPhysicalName into tag, so second table will +only provide tags into result. Configuration: + +``` +[[inputs.snmp.table]] +name = "ciscoPowerEntity" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +secondary_index_table = true # enables joining + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +secondary_index_use = true # this tag is indexed from secondary table +is_tag = true +``` + +Result: +``` +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 ``` ### Troubleshooting diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 7f2df6b689eac..a2259e88179c2 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -187,11 +187,18 @@ func (t *Table) Init() error { return err } + secondaryIndexTablePresent := false // initialize all the nested fields for i := range t.Fields { if err := t.Fields[i].init(); err != nil { return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) } + if t.Fields[i].SecondaryIndexTable { + if secondaryIndexTablePresent { + return fmt.Errorf("only one field can be SecondaryIndexTable") + } + secondaryIndexTablePresent = true + } } t.initialized = true @@ -252,6 +259,19 @@ type Field struct { Conversion string // Translate tells if the value of the field should be snmptranslated Translate bool + // Secondary index table allows to merge data from two tables with different index + // that this filed will be used to join them. There can be only one secondary index table. + SecondaryIndexTable bool + // This field is using secondary index, and will be later merged with primary index + // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. + SecondaryIndexUse bool + // Controls if entries from secondary table should be added or not if joining + // index is present or not. I set to true, means that join is outer, and + // index is prepended with "Secondary." for missing values to avoid overlaping + // indexes from both tables. + // Can be set per field or globally with SecondaryIndexTable, global true overrides + // per field false. + SecondaryOuterJoin bool initialized bool } @@ -278,6 +298,14 @@ func (f *Field) init() error { //TODO use textual convention conversion from the MIB } + if f.SecondaryIndexTable && f.SecondaryIndexUse { + return fmt.Errorf("SecondaryIndexTable and UseSecondaryIndex are exclusive") + } + + if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { + return fmt.Errorf("SecondaryOuterJoin set to true, but field is not being used in join") + } + f.initialized = true return nil } @@ -414,6 +442,19 @@ func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { rows := map[string]RTableRow{} + //translation table for secondary index (when preforming join on two tables) + secIdxTab := make(map[string]string) + secGlobalOuterJoin := false + for i, f := range t.Fields { + if f.SecondaryIndexTable { + secGlobalOuterJoin = f.SecondaryOuterJoin + if i != 0 { + t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] + } + break + } + } + tagCount := 0 for _, f := range t.Fields { if f.IsTag { @@ -519,6 +560,16 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } for idx, v := range ifv { + if f.SecondaryIndexUse { + if newidx, ok := secIdxTab[idx]; ok { + idx = newidx + } else { + if !secGlobalOuterJoin && !f.SecondaryOuterJoin { + continue + } + idx = ".Secondary" + idx + } + } rtr, ok := rows[idx] if !ok { rtr = RTableRow{} @@ -543,6 +594,20 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { rtr.Fields[f.Name] = v } + if f.SecondaryIndexTable { + //indexes are stored here with prepending "." so we need to add them if needed + var vss string + if ok { + vss = "." + vs + } else { + vss = fmt.Sprintf(".%v", v) + } + if idx[0] == '.' { + secIdxTab[vss] = idx + } else { + secIdxTab[vss] = "." + idx + } + } } } } diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index 7227771a7e4fa..f87f9029b0d06 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate package main diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index f447f13c54e67..49c9bf381b107 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -81,6 +81,15 @@ var tsc = &testSNMPConnection{ ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -960,3 +969,242 @@ func TestSnmpTableCache_hit(t *testing.T) { assert.Equal(t, []Field{{Name: "d"}}, fields) assert.Equal(t, fmt.Errorf("e"), err) } + +func TestTableJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib index 7c3758d66d9a1..c6e7a2a8962b6 100644 --- a/plugins/inputs/snmp/testdata/test.mib +++ b/plugins/inputs/snmp/testdata/test.mib @@ -55,4 +55,43 @@ hostname OBJECT-TYPE STATUS current ::= { testOID 1 1 } +testSecondaryTable OBJECT-TYPE + SYNTAX SEQUENCE OF testSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + ::= { testOID 3 } + +testSecondaryTableEntry OBJECT-TYPE + SYNTAX TestSecondaryTableEntry + MAX-ACCESS not-accessible + STATUS current + INDEX { + instance + } + ::= { testSecondaryTable 1 } + +TestSecondaryTableEntry ::= + SEQUENCE { + instance OCTET STRING, + connections INTEGER, + testTableIndex INTEGER, + } + +instance OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 1 } + +connections OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 2 } + +testTableIndex OBJECT-TYPE + SYNTAX OCTET STRING + MAX-ACCESS read-only + STATUS current + ::= { testSecondaryTableEntry 3 } END diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index d85afca8e4e7f..604a2205c0d2c 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,9 +1,9 @@ package snmp_legacy import ( - "io/ioutil" "log" "net" + "os" "strconv" "strings" "time" @@ -296,7 +296,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index c33e59f7129b6..a3ccacae1ceb2 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" @@ -69,7 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") @@ -133,7 +132,7 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") @@ -163,7 +162,7 @@ func TestSocketListener_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go index 26cf7e08b5170..945e2b8425a3b 100644 --- a/plugins/inputs/sql/drivers_sqlite.go +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go index c6c4658d83959..87227663bb4d0 100644 --- a/plugins/inputs/sql/sql.go +++ b/plugins/inputs/sql/sql.go @@ -5,7 +5,7 @@ import ( dbsql "database/sql" "errors" "fmt" - "io/ioutil" + "os" "sort" "strings" "sync" @@ -326,7 +326,7 @@ func (s *SQL) Init() error { // In case we got a script, we should read the query now. if q.Script != "" { - query, err := ioutil.ReadFile(q.Script) + query, err := os.ReadFile(q.Script) if err != nil { return fmt.Errorf("reading script %q failed: %v", q.Script, err) } diff --git a/plugins/inputs/sqlserver/azuresqldbqueries_test.go b/plugins/inputs/sqlserver/azuresqldbqueries_test.go new file mode 100644 index 0000000000000..6d5712f39509a --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqldbqueries_test.go @@ -0,0 +1,450 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQL_Database_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "database_name")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_memory_usage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "xtp_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_session_percent")) + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "dtu_limit")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "avg_login_rate_percent")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "end_time")) // Time field. + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_memory_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_db_resource_governance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "database_name")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_pfs_iops")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_WaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azuredb_waitstats")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "current_size_mb")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "space_used_mb")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_server_properties", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Database_OsWaitstats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "database_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "database_name")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasField("sqlserver_requests", "wait_type")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasField("sqlserver_requests", "nt_user_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Database_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go new file mode 100644 index 0000000000000..72a74174a8722 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go @@ -0,0 +1,378 @@ +package sqlserver + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestAzureSQL_Managed_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_instance_resource_governance")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "tempdb_log_file_number")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_man_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_ext_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "vol_ext_xtore_iops")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version_desc")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_online")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_restoring")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recovering")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recoveryPending")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_suspect")) + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_Managed_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_type")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasTag("sqlserver_requests", "nt_user_name")) + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQL_Managed_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMISchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 885913f91dd1c..cc8b1a40a10a5 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sync" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" googlepbduration "github.com/golang/protobuf/ptypes/duration" googlepbts "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" @@ -397,7 +397,7 @@ func (s *Stackdriver) newTimeSeriesConf( StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), Filter: filter, Interval: interval, } @@ -533,7 +533,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( ret := []*timeSeriesConf{} req := &monitoringpb.ListMetricDescriptorsRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), } filters := s.newListMetricDescriptorsFilters() diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9b620efc3e216..f3fc5f14eb394 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -21,7 +20,7 @@ var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats"," var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -38,7 +37,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test1.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -49,7 +48,7 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, err) //test suricata alerts - data2, err := ioutil.ReadFile("testdata/test2.json") + data2, err := os.ReadFile("testdata/test2.json") require.NoError(t, err) _, err = c.Write(data2) require.NoError(t, err) @@ -61,7 +60,7 @@ func TestSuricataLarge(t *testing.T) { } func TestSuricataAlerts(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -78,7 +77,7 @@ func TestSuricataAlerts(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test3.json") + data, err := os.ReadFile("testdata/test3.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) @@ -116,7 +115,7 @@ func TestSuricataAlerts(t *testing.T) { } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -162,7 +161,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -212,7 +211,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -254,7 +253,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -282,7 +281,7 @@ func TestSuricataTooLongLine(t *testing.T) { } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -309,7 +308,7 @@ func TestSuricataEmptyJSON(t *testing.T) { } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -345,7 +344,7 @@ func TestSuricataDisconnectSocket(t *testing.T) { } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir, err := os.MkdirTemp("", "test") require.NoError(t, err) defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) @@ -387,7 +386,7 @@ func TestSuricataParse(t *testing.T) { } for _, tc := range tests { - data, err := ioutil.ReadFile("testdata/" + tc.filename) + data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) s := Suricata{ Delimiter: "_", diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index bcc9729384282..93cd26e3343f3 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index 71a223644d8ed..f12fc70656eba 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index fc5d67d6a064a..e8fbe62989055 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package synproxy import ( - "io/ioutil" "os" "testing" @@ -155,7 +155,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index 4d29daaf53915..7782ad968a3b1 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,7 +2,6 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" "os" "path/filepath" @@ -270,7 +269,7 @@ func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -278,7 +277,7 @@ func TestNonTransparentStrict_unix(t *testing.T) { } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -286,7 +285,7 @@ func TestNonTransparentBestEffort_unix(t *testing.T) { } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -294,7 +293,7 @@ func TestNonTransparentStrict_unix_tls(t *testing.T) { } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 53fee69d112a5..1c0cc024507e2 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,7 +3,6 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -470,7 +469,7 @@ func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") @@ -478,7 +477,7 @@ func TestOctetCountingStrict_unix(t *testing.T) { } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") @@ -486,7 +485,7 @@ func TestOctetCountingBestEffort_unix(t *testing.T) { } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") @@ -494,7 +493,7 @@ func TestOctetCountingStrict_unix_tls(t *testing.T) { } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index ab3fe2ceaf60f..5bcb847b36ec4 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,7 +2,6 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" "path/filepath" @@ -290,7 +289,7 @@ func TestBestEffort_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") @@ -304,7 +303,7 @@ func TestStrict_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index ac0539d30e1af..00146fde9cd26 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,7 +1,6 @@ package syslog import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -46,7 +45,7 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") defer os.RemoveAll(tmpdir) require.NoError(t, err) sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 01b4db9fa4af9..7e69ff41ccdf2 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_interval_test.go b/plugins/inputs/sysstat/sysstat_interval_test.go index 972eb9af936de..f714ec10b1c36 100644 --- a/plugins/inputs/sysstat/sysstat_interval_test.go +++ b/plugins/inputs/sysstat/sysstat_interval_test.go @@ -1,5 +1,5 @@ -// +build !race -// +build linux +//go:build !race && linux +// +build !race,linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_notlinux.go b/plugins/inputs/sysstat/sysstat_notlinux.go index e97e71e78280c..6b5dd6fcf18cb 100644 --- a/plugins/inputs/sysstat/sysstat_notlinux.go +++ b/plugins/inputs/sysstat/sysstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sysstat diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 1766130391bbb..64b596bb329ba 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sysstat diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md index 7fe09e224c564..f9d47d7df1252 100644 --- a/plugins/inputs/systemd_units/README.md +++ b/plugins/inputs/systemd_units/README.md @@ -1,7 +1,7 @@ # systemd Units Input Plugin The systemd_units plugin gathers systemd unit status on Linux. It relies on -`systemctl list-units --all --plain --type=service` to collect data on service status. +`systemctl list-units [PATTERN] --all --plain --type=service` to collect data on service status. The results are tagged with the unit name and provide enumerated fields for loaded, active and running fields, indicating the unit health. @@ -22,6 +22,13 @@ see `systemctl list-units --all --type help` for possible options. ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ``` ### Metrics diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go index e94b9432136e4..e41c64752977e 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux.go +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -18,10 +18,11 @@ import ( type SystemdUnits struct { Timeout config.Duration UnitType string `toml:"unittype"` + Pattern string `toml:"pattern"` systemctl systemctl } -type systemctl func(timeout config.Duration, unitType string) (*bytes.Buffer, error) +type systemctl func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) const measurement = "systemd_units" @@ -115,6 +116,7 @@ var subMap = map[string]int{ var ( defaultTimeout = config.Duration(time.Second) defaultUnitType = "service" + defaultPattern = "" ) // Description returns a short description of the plugin @@ -132,12 +134,19 @@ func (s *SystemdUnits) SampleConfig() string { ## values are "socket", "target", "device", "mount", "automount", "swap", ## "timer", "path", "slice" and "scope ": # unittype = "service" + # + ## Filter for a specific pattern, default is "" (i.e. all), other possible + ## values are valid pattern for systemctl, e.g. "a*" for all units with + ## names starting with "a" + # pattern = "" + ## pattern = "telegraf* influxdb*" + ## pattern = "a*" ` } // Gather parses systemctl outputs and adds counters to the Accumulator func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { - out, err := s.systemctl(s.Timeout, s.UnitType) + out, err := s.systemctl(s.Timeout, s.UnitType, s.Pattern) if err != nil { return err } @@ -192,22 +201,32 @@ func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { return nil } -func setSystemctl(timeout config.Duration, unitType string) (*bytes.Buffer, error) { +func setSystemctl(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { // is systemctl available ? systemctlPath, err := exec.LookPath("systemctl") if err != nil { return nil, err } - - cmd := exec.Command(systemctlPath, "list-units", "--all", "--plain", fmt.Sprintf("--type=%s", unitType), "--no-legend") - + // build parameters for systemctl call + params := []string{"list-units"} + // create patterns parameters if provided in config + if pattern != "" { + psplit := strings.SplitN(pattern, " ", -1) + for v := range psplit { + params = append(params, psplit[v]) + } + } + params = append(params, "--all", "--plain") + // add type as configured in config + params = append(params, fmt.Sprintf("--type=%s", unitType)) + params = append(params, "--no-legend") + cmd := exec.Command(systemctlPath, params...) var out bytes.Buffer cmd.Stdout = &out err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { - return &out, fmt.Errorf("error running systemctl list-units --all --plain --type=%s --no-legend: %s", unitType, err) + return &out, fmt.Errorf("error running systemctl %s: %s", strings.Join(params, " "), err) } - return &out, nil } @@ -217,6 +236,7 @@ func init() { systemctl: setSystemctl, Timeout: defaultTimeout, UnitType: defaultUnitType, + Pattern: defaultPattern, } }) } diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go index a6cfbd6552771..05070c6ff5e94 100644 --- a/plugins/inputs/systemd_units/systemd_units_linux_test.go +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -74,7 +74,7 @@ func TestSystemdUnits(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { systemdUnits := &SystemdUnits{ - systemctl: func(timeout config.Duration, unitType string) (*bytes.Buffer, error) { + systemctl: func(timeout config.Duration, unitType string, pattern string) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.line), nil }, } diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go index f53cea3de6eba..32f5b97cc37ec 100644 --- a/plugins/inputs/systemd_units/systemd_units_notlinux.go +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package systemd_units diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index d84c09ff8d3c2..d5bda84732ad8 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -1,3 +1,4 @@ +//go:build !solaris // +build !solaris package tail diff --git a/plugins/inputs/tail/tail_solaris.go b/plugins/inputs/tail/tail_solaris.go index 802088da28248..093dd16a06c23 100644 --- a/plugins/inputs/tail/tail_solaris.go +++ b/plugins/inputs/tail/tail_solaris.go @@ -1,5 +1,6 @@ // Skipping plugin on Solaris due to fsnotify support // +//go:build solaris // +build solaris package tail diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index b855691e6f1ab..1098a10edbff5 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -2,7 +2,6 @@ package tail import ( "bytes" - "io/ioutil" "log" "os" "path/filepath" @@ -49,7 +48,7 @@ func NewTestTail() *Tail { } func TestTailBadLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -86,7 +85,7 @@ func TestTailBadLine(t *testing.T) { } func TestTailDosLineEndings(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") @@ -173,7 +172,7 @@ func TestGrokParseLogFilesWithMultiline(t *testing.T) { } func TestGrokParseLogFilesWithMultilineTimeout(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -286,7 +285,7 @@ func createGrokParser() (parsers.Parser, error) { // The csv parser should only parse the header line once per file. func TestCSVHeadersParsedOnce(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -345,7 +344,7 @@ cpu,42 // Ensure that the first line can produce multiple metrics (#6138) func TestMultipleMetricsOnFirstLine(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -450,89 +449,86 @@ func TestCharacterEncoding(t *testing.T) { ), } + watchMethod := defaultWatchMethod + if runtime.GOOS == "windows" { + watchMethod = "poll" + } + tests := []struct { - name string - plugin *Tail - offset int64 - expected []telegraf.Metric + name string + testfiles string + fromBeginning bool + characterEncoding string + offset int64 + expected []telegraf.Metric }{ { - name: "utf-8", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - expected: full, + name: "utf-8", + testfiles: "cpu-utf-8.influx", + fromBeginning: true, + characterEncoding: "utf-8", + expected: full, }, { - name: "utf-8 seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-8.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-8", - }, - offset: 0x33, - expected: full[1:], + name: "utf-8 seek", + testfiles: "cpu-utf-8.influx", + characterEncoding: "utf-8", + offset: 0x33, + expected: full[1:], }, { - name: "utf-16le", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - expected: full, + name: "utf-16le", + testfiles: "cpu-utf-16le.influx", + fromBeginning: true, + characterEncoding: "utf-16le", + expected: full, }, { - name: "utf-16le seek", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16le.influx")}, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16le", - }, - offset: 0x68, - expected: full[1:], + name: "utf-16le seek", + testfiles: "cpu-utf-16le.influx", + characterEncoding: "utf-16le", + offset: 0x68, + expected: full[1:], }, { - name: "utf-16be", - plugin: &Tail{ - Files: []string{filepath.Join(testdataDir, "cpu-utf-16be.influx")}, - FromBeginning: true, - MaxUndeliveredLines: 1000, - Log: testutil.Logger{}, - CharacterEncoding: "utf-16be", - }, - expected: full, + name: "utf-16be", + testfiles: "cpu-utf-16be.influx", + fromBeginning: true, + characterEncoding: "utf-16be", + expected: full, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tt.plugin.SetParserFunc(func() (parsers.Parser, error) { + + plugin := &Tail{ + Files: []string{filepath.Join(testdataDir, tt.testfiles)}, + FromBeginning: tt.fromBeginning, + MaxUndeliveredLines: 1000, + Log: testutil.Logger{}, + CharacterEncoding: tt.characterEncoding, + WatchMethod: watchMethod, + } + + plugin.SetParserFunc(func() (parsers.Parser, error) { handler := influx.NewMetricHandler() return influx.NewParser(handler), nil }) if tt.offset != 0 { - tt.plugin.offsets = map[string]int64{ - tt.plugin.Files[0]: tt.offset, + plugin.offsets = map[string]int64{ + plugin.Files[0]: tt.offset, } } - err := tt.plugin.Init() + err := plugin.Init() require.NoError(t, err) var acc testutil.Accumulator - err = tt.plugin.Start(&acc) + err = plugin.Start(&acc) require.NoError(t, err) acc.Wait(len(tt.expected)) - tt.plugin.Stop() + plugin.Stop() actual := acc.GetTelegrafMetrics() for _, m := range actual { @@ -545,7 +541,7 @@ func TestCharacterEncoding(t *testing.T) { } func TestTailEOF(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) _, err = tmpfile.WriteString("cpu usage_idle=100\r\n") diff --git a/plugins/inputs/trig/README.md b/plugins/inputs/trig/README.md new file mode 100644 index 0000000000000..41ff8743e8cf3 --- /dev/null +++ b/plugins/inputs/trig/README.md @@ -0,0 +1,28 @@ +# Trig Input Plugin + +The `trig` plugin is for demonstration purposes and inserts sine and cosine + +### Configuration + +```toml +# Inserts sine and cosine waves for demonstration purposes +[[inputs.trig]] + ## Set the amplitude + amplitude = 10.0 +``` + +### Metrics + +- trig + - fields: + - cosine (float) + - sine (float) + + +### Example Output + +``` +trig,host=MBP15-SWANG.local cosine=10,sine=0 1632338680000000000 +trig,host=MBP15-SWANG.local sine=5.877852522924732,cosine=8.090169943749473 1632338690000000000 +trig,host=MBP15-SWANG.local sine=9.510565162951535,cosine=3.0901699437494745 1632338700000000000 +``` diff --git a/plugins/inputs/twemproxy/README.md b/plugins/inputs/twemproxy/README.md new file mode 100644 index 0000000000000..0c07e0aec4463 --- /dev/null +++ b/plugins/inputs/twemproxy/README.md @@ -0,0 +1,16 @@ +# Twemproxy Input Plugin + +The `twemproxy` plugin gathers statistics from [Twemproxy](https://github.com/twitter/twemproxy) servers. + + +### Configuration + +```toml +# Read Twemproxy stats data +[[inputs.twemproxy]] + ## Twemproxy stats address and port (no scheme) + addr = "localhost:22222" + ## Monitor pool name + pools = ["redis_pool", "mc_pool"] +``` + diff --git a/plugins/inputs/twemproxy/twemproxy.go b/plugins/inputs/twemproxy/twemproxy.go index cda56943f1002..b4c4b52f85b6c 100644 --- a/plugins/inputs/twemproxy/twemproxy.go +++ b/plugins/inputs/twemproxy/twemproxy.go @@ -3,7 +3,7 @@ package twemproxy import ( "encoding/json" "errors" - "io/ioutil" + "io" "net" "time" @@ -37,7 +37,7 @@ func (t *Twemproxy) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - body, err := ioutil.ReadAll(conn) + body, err := io.ReadAll(conn) if err != nil { return err } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index 8bd8262c035b0..3e36838c6192a 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -104,7 +104,7 @@ package udp_listener // } // func TestRunParser(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short,host=server01 value=12.0 1422568543702900257\n") // listener, in := newTestUDPListener() @@ -127,7 +127,7 @@ package udp_listener // } // func TestRunParserInvalidMsg(_ *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu_load_short") // listener, in := newTestUDPListener() @@ -153,7 +153,7 @@ package udp_listener // } // func TestRunParserGraphiteMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("cpu.load.graphite 12 1454780029") // listener, in := newTestUDPListener() @@ -174,7 +174,7 @@ package udp_listener // } // func TestRunParserJSONMsg(t *testing.T) { -// log.SetOutput(ioutil.Discard) +// log.SetOutput(io.Discard) // var testmsg = []byte("{\"a\": 5, \"b\": {\"c\": 6}}\n") // listener, in := newTestUDPListener() diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index e4f18bee42ed3..d9872b9d81af7 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 4ba9e941a52ee..088c08378c1ef 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package varnish diff --git a/plugins/inputs/varnish/varnish_windows.go b/plugins/inputs/varnish/varnish_windows.go index 0c85c106f2b4f..9fed7dfc2a3c8 100644 --- a/plugins/inputs/varnish/varnish_windows.go +++ b/plugins/inputs/varnish/varnish_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package varnish diff --git a/plugins/inputs/webhooks/filestack/filestack_webhooks.go b/plugins/inputs/webhooks/filestack/filestack_webhooks.go index 19f8c0251bbb7..44def8c6f5141 100644 --- a/plugins/inputs/webhooks/filestack/filestack_webhooks.go +++ b/plugins/inputs/webhooks/filestack/filestack_webhooks.go @@ -2,7 +2,7 @@ package filestack import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (fs *FilestackWebhook) Register(router *mux.Router, acc telegraf.Accumulato func (fs *FilestackWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/github/github_webhooks.go b/plugins/inputs/webhooks/github/github_webhooks.go index 5febb80afb6bb..2d48cbef2e5f2 100644 --- a/plugins/inputs/webhooks/github/github_webhooks.go +++ b/plugins/inputs/webhooks/github/github_webhooks.go @@ -5,7 +5,7 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "log" "net/http" @@ -28,7 +28,7 @@ func (gh *GithubWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (gh *GithubWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() eventType := r.Header.Get("X-Github-Event") - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go index a7e219c53c905..67ba86908d1a1 100644 --- a/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go +++ b/plugins/inputs/webhooks/mandrill/mandrill_webhooks.go @@ -2,7 +2,7 @@ package mandrill import ( "encoding/json" - "io/ioutil" + "io" "log" "net/http" "net/url" @@ -31,7 +31,7 @@ func (md *MandrillWebhook) returnOK(w http.ResponseWriter, _ *http.Request) { func (md *MandrillWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/webhooks/papertrail/README.md b/plugins/inputs/webhooks/papertrail/README.md index a3463dcaa6f8b..3f9c33ec5320c 100644 --- a/plugins/inputs/webhooks/papertrail/README.md +++ b/plugins/inputs/webhooks/papertrail/README.md @@ -14,6 +14,23 @@ Events from Papertrail come in two forms: * Each point has a field counter (`count`), which is set to `1` (signifying the event occurred) * Each event "hostname" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag + * The "saved_search" id in the payload is added as a `search_id` field + * The papertrail url to view the event is built and added as a `url` field + * The rest of the data in the event is converted directly to fields on the point: + * `id` + * `source_ip` + * `source_name` + * `source_id` + * `program` + * `severity` + * `facility` + * `message` + +When a callback is received, an event-based point will look similar to: + +``` +papertrail,host=myserver.example.com,event=saved_search_name count=1i,source_name="abc",program="CROND",severity="Info",source_id=2i,message="message body",source_ip="208.75.57.121",id=7711561783320576i,facility="Cron",url="https://papertrailapp.com/searches/42?centered_on_id=7711561783320576",search_id=42i 1453248892000000000 +``` * The [count-based callback](http://help.papertrailapp.com/kb/how-it-works/web-hooks/#count-only-webhooks) @@ -22,10 +39,7 @@ Events from Papertrail come in two forms: * Each count "source_name" object is converted to a `host` tag * The "saved_search" name in the payload is added as an `event` tag -The current functionality is very basic, however this allows you to -track the number of events by host and saved search. - -When an event is received, any point will look similar to: +When a callback is received, a count-based point will look similar to: ``` papertrail,host=myserver.example.com,event=saved_search_name count=3i 1453248892000000000 diff --git a/plugins/inputs/webhooks/papertrail/papertrail_test.go b/plugins/inputs/webhooks/papertrail/papertrail_test.go index 14b8aec895c98..6cba6730c9486 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_test.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_test.go @@ -67,8 +67,32 @@ func TestEventPayload(t *testing.T) { resp := post(pt, contentType, form.Encode()) require.Equal(t, http.StatusOK, resp.Code) - fields := map[string]interface{}{ - "count": uint64(1), + fields1 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711561783320576), + "source_ip": "208.75.57.121", + "source_name": "abc", + "source_id": int64(2), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "message body", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711561783320576", + "search_id": int64(42), + } + + fields2 := map[string]interface{}{ + "count": uint64(1), + "id": int64(7711562567655424), + "source_ip": "208.75.57.120", + "source_name": "server1", + "source_id": int64(19), + "program": "CROND", + "severity": "Info", + "facility": "Cron", + "message": "A short event", + "url": "https://papertrailapp.com/searches/42?centered_on_id=7711562567655424", + "search_id": int64(42), } tags1 := map[string]string{ @@ -80,8 +104,8 @@ func TestEventPayload(t *testing.T) { "host": "def", } - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags1) - acc.AssertContainsTaggedFields(t, "papertrail", fields, tags2) + acc.AssertContainsTaggedFields(t, "papertrail", fields1, tags1) + acc.AssertContainsTaggedFields(t, "papertrail", fields2, tags2) } func TestCountPayload(t *testing.T) { diff --git a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go index 7f11e31e79a11..5aa8ecaf83fc2 100644 --- a/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go +++ b/plugins/inputs/webhooks/papertrail/papertrail_webhooks.go @@ -2,6 +2,7 @@ package papertrail import ( "encoding/json" + "fmt" "log" "net/http" "time" @@ -49,7 +50,17 @@ func (pt *PapertrailWebhook) eventHandler(w http.ResponseWriter, r *http.Request "event": payload.SavedSearch.Name, } fields := map[string]interface{}{ - "count": uint64(1), + "count": uint64(1), + "id": e.ID, + "source_ip": e.SourceIP, + "source_name": e.SourceName, + "source_id": int64(e.SourceID), + "program": e.Program, + "severity": e.Severity, + "facility": e.Facility, + "message": e.Message, + "url": fmt.Sprintf("%s?centered_on_id=%d", payload.SavedSearch.SearchURL, e.ID), + "search_id": payload.SavedSearch.ID, } pt.acc.AddFields("papertrail", fields, tags, e.ReceivedAt) } diff --git a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go index 55ff7eb2f3594..d9c1323cdd608 100644 --- a/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go +++ b/plugins/inputs/webhooks/rollbar/rollbar_webhooks.go @@ -3,7 +3,7 @@ package rollbar import ( "encoding/json" "errors" - "io/ioutil" + "io" "log" "net/http" "time" @@ -25,7 +25,7 @@ func (rb *RollbarWebhook) Register(router *mux.Router, acc telegraf.Accumulator) func (rb *RollbarWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) return diff --git a/plugins/inputs/win_eventlog/event.go b/plugins/inputs/win_eventlog/event.go index 2169ce8b490b3..86ddefdcb95e0 100644 --- a/plugins/inputs/win_eventlog/event.go +++ b/plugins/inputs/win_eventlog/event.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/syscall_windows.go b/plugins/inputs/win_eventlog/syscall_windows.go index df02913eee2af..d7bc07d0a5d42 100644 --- a/plugins/inputs/win_eventlog/syscall_windows.go +++ b/plugins/inputs/win_eventlog/syscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util.go b/plugins/inputs/win_eventlog/util.go index 7435cdb09ceaf..276e7514228e0 100644 --- a/plugins/inputs/win_eventlog/util.go +++ b/plugins/inputs/win_eventlog/util.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/util_test.go b/plugins/inputs/win_eventlog/util_test.go index ce7428dd391d2..1dc90cc2326d3 100644 --- a/plugins/inputs/win_eventlog/util_test.go +++ b/plugins/inputs/win_eventlog/util_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog.go b/plugins/inputs/win_eventlog/win_eventlog.go index 8d0efe3119d97..2ee303d483530 100644 --- a/plugins/inputs/win_eventlog/win_eventlog.go +++ b/plugins/inputs/win_eventlog/win_eventlog.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go index 005077aa64c7d..e78ad6133b367 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_notwindows.go +++ b/plugins/inputs/win_eventlog/win_eventlog_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows //revive:disable-next-line:var-naming diff --git a/plugins/inputs/win_eventlog/win_eventlog_test.go b/plugins/inputs/win_eventlog/win_eventlog_test.go index 9f922431ed776..bd6a434f40088 100644 --- a/plugins/inputs/win_eventlog/win_eventlog_test.go +++ b/plugins/inputs/win_eventlog/win_eventlog_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_eventlog/zsyscall_windows.go b/plugins/inputs/win_eventlog/zsyscall_windows.go index 5c7b0a504b0bf..34c17471691e8 100644 --- a/plugins/inputs/win_eventlog/zsyscall_windows.go +++ b/plugins/inputs/win_eventlog/zsyscall_windows.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows //revive:disable-next-line:var-naming // Package win_eventlog Input plugin to collect Windows Event Log messages diff --git a/plugins/inputs/win_perf_counters/kernel32.go b/plugins/inputs/win_perf_counters/kernel32.go index 9cdadedc873bd..09cbd4be5f182 100644 --- a/plugins/inputs/win_perf_counters/kernel32.go +++ b/plugins/inputs/win_perf_counters/kernel32.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 3a24761b9d593..d4e5f14a1c267 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_386.go b/plugins/inputs/win_perf_counters/pdh_386.go index 134d15c8d1461..ec572db72447e 100644 --- a/plugins/inputs/win_perf_counters/pdh_386.go +++ b/plugins/inputs/win_perf_counters/pdh_386.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/pdh_amd64.go b/plugins/inputs/win_perf_counters/pdh_amd64.go index ff3b39335bcd4..1afedc317260e 100644 --- a/plugins/inputs/win_perf_counters/pdh_amd64.go +++ b/plugins/inputs/win_perf_counters/pdh_amd64.go @@ -28,6 +28,7 @@ // Joseph Watson // Kevin Pors +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index a59f96b84dc43..ab130a41dec3f 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -1,4 +1,5 @@ // Go API over pdh syscalls +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 4bcbbfb1b2318..3a74e34a5228a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 43b20eb611577..a5ae58370ab4a 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go index 427f5d5461ff3..00af92b722552 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_perf_counters diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 930e923754ac8..969b518d0f2b0 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_perf_counters diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 185e9b6b67de4..38f873a99284d 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 998aa1ed5eb2f..3c831642a01cf 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows //these tests must be run under administrator account diff --git a/plugins/inputs/win_services/win_services_notwindows.go b/plugins/inputs/win_services/win_services_notwindows.go index 062c11cfc8eed..aa2f3534ca74d 100644 --- a/plugins/inputs/win_services/win_services_notwindows.go +++ b/plugins/inputs/win_services/win_services_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package win_services diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 7d1672e8f6515..69a75372dd086 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package win_services diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go index 75890a7901074..29a0250d92b7f 100644 --- a/plugins/inputs/wireless/wireless_linux.go +++ b/plugins/inputs/wireless/wireless_linux.go @@ -1,10 +1,10 @@ +//go:build linux // +build linux package wireless import ( "bytes" - "io/ioutil" "log" "os" "path" @@ -46,7 +46,7 @@ func (w *Wireless) Gather(acc telegraf.Accumulator) error { w.loadPath() wirelessPath := path.Join(w.HostProc, "net", "wireless") - table, err := ioutil.ReadFile(wirelessPath) + table, err := os.ReadFile(wirelessPath) if err != nil { return err } diff --git a/plugins/inputs/wireless/wireless_notlinux.go b/plugins/inputs/wireless/wireless_notlinux.go index 4769acc970e42..435559ca58529 100644 --- a/plugins/inputs/wireless/wireless_notlinux.go +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package wireless diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go index 6c562887e54db..20c10de88a347 100644 --- a/plugins/inputs/wireless/wireless_test.go +++ b/plugins/inputs/wireless/wireless_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package wireless diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go index b106f91b772f6..3486f2779eb2b 100644 --- a/plugins/inputs/x509_cert/x509_cert.go +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -7,14 +7,15 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "net" "net/url" + "os" "path/filepath" "strings" "time" + "github.com/pion/dtls/v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" @@ -176,7 +177,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica return certs, nil case "file": - content, err := ioutil.ReadFile(u.Path) + content, err := os.ReadFile(u.Path) if err != nil { return nil, err } diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go index 9c42c09bdabda..f0b0379109749 100644 --- a/plugins/inputs/x509_cert/x509_cert_test.go +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -4,8 +4,6 @@ import ( "crypto/tls" "encoding/base64" "fmt" - "github.com/pion/dtls/v2" - "io/ioutil" "math/big" "net" "net/url" @@ -15,6 +13,8 @@ import ( "testing" "time" + "github.com/pion/dtls/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ var _ telegraf.Input = &X509Cert{} func TestGatherRemoteIntegration(t *testing.T) { t.Skip("Skipping network-dependent test due to race condition when test-all") - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp("", "example") require.NoError(t, err) defer os.Remove(tmpfile.Name()) @@ -149,7 +149,7 @@ func TestGatherLocal(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) @@ -181,7 +181,7 @@ func TestGatherLocal(t *testing.T) { func TestTags(t *testing.T) { cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(cert)) @@ -238,7 +238,7 @@ func TestGatherChain(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - f, err := ioutil.TempFile("", "x509_cert") + f, err := os.CreateTemp("", "x509_cert") require.NoError(t, err) _, err = f.Write([]byte(test.content)) diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 491388147d93c..e493e3fc9a0bb 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs @@ -173,8 +174,11 @@ func run(command string, args ...string) ([]string, error) { stdout := strings.TrimSpace(outbuf.String()) stderr := strings.TrimSpace(errbuf.String()) - if _, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s error: %s", command, stderr) + if err != nil { + if _, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("%s error: %s", command, stderr) + } + return nil, fmt.Errorf("%s error: %s", command, err) } return strings.Split(stdout, "\n"), nil } diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 4d1fea0ae483a..816f82b6dbf5b 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package zfs diff --git a/plugins/inputs/zfs/zfs_linux.go b/plugins/inputs/zfs/zfs_linux.go index 276880d7dea97..ac3ca6ee81d23 100644 --- a/plugins/inputs/zfs/zfs_linux.go +++ b/plugins/inputs/zfs/zfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package zfs diff --git a/plugins/inputs/zfs/zfs_linux_test.go b/plugins/inputs/zfs/zfs_linux_test.go index 7d8aff81c689c..b844759eaffd1 100644 --- a/plugins/inputs/zfs/zfs_linux_test.go +++ b/plugins/inputs/zfs/zfs_linux_test.go @@ -1,9 +1,9 @@ +//go:build linux // +build linux package zfs import ( - "io/ioutil" "os" "testing" @@ -191,10 +191,10 @@ func TestZfsPoolMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(poolIoContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) poolMetrics := getPoolMetrics() @@ -230,25 +230,25 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/HOME", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/HOME/io", []byte(""), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/arcstats", []byte(arcstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/zfetchstats", []byte(zfetchstatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) + err = os.WriteFile(testKstatPath+"/zil", []byte(zilContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) + err = os.WriteFile(testKstatPath+"/fm", []byte(fmContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) + err = os.WriteFile(testKstatPath+"/dmu_tx", []byte(dmuTxContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) + err = os.WriteFile(testKstatPath+"/abdstats", []byte(abdstatsContents), 0644) require.NoError(t, err) intMetrics := getKstatMetricsAll() @@ -271,7 +271,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) + err = os.WriteFile(testKstatPath+"/STORAGE/io", []byte(""), 0644) require.NoError(t, err) tags = map[string]string{ diff --git a/plugins/inputs/zfs/zfs_other.go b/plugins/inputs/zfs/zfs_other.go index 98de02be917dd..963afd3580ff8 100644 --- a/plugins/inputs/zfs/zfs_other.go +++ b/plugins/inputs/zfs/zfs_other.go @@ -1,3 +1,4 @@ +//go:build !linux && !freebsd // +build !linux,!freebsd package zfs diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index 3889e2f2cd9ea..a1abccc420ad9 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -24,7 +24,10 @@ import ( "log" "time" - zipkin "github.com/openzipkin/zipkin-go-opentracing" + otlog "github.com/opentracing/opentracing-go/log" + zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing" + "github.com/openzipkin/zipkin-go" + zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http" ) var ( @@ -46,27 +49,30 @@ func init() { func main() { flag.Parse() var hostname = fmt.Sprintf("http://%s:9411/api/v1/spans", ZipkinServerHost) - collector, err := zipkin.NewHTTPCollector( + reporter := zipkinhttp.NewReporter( hostname, - zipkin.HTTPBatchSize(BatchSize), - zipkin.HTTPMaxBacklog(MaxBackLog), - zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) + zipkinhttp.BatchSize(BatchSize), + zipkinhttp.MaxBacklog(MaxBackLog), + zipkinhttp.BatchInterval(time.Duration(BatchTimeInterval)*time.Second), + ) + defer reporter.Close() + + endpoint, err := zipkin.NewEndpoint("Trivial", "127.0.0.1:0") if err != nil { - log.Fatalf("Error initializing zipkin http collector: %v\n", err) + log.Fatalf("Error: %v\n", err) } - defer collector.Close() - - tracer, err := zipkin.NewTracer( - zipkin.NewRecorder(collector, false, "127.0.0.1:0", "Trivial")) + nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint)) if err != nil { log.Fatalf("Error: %v\n", err) } + tracer := zipkinot.Wrap(nativeTracer) + log.Printf("Writing %d spans to zipkin server at %s\n", SpanCount, hostname) for i := 0; i < SpanCount; i++ { parent := tracer.StartSpan("Parent") - parent.LogEvent(fmt.Sprintf("Trace%d", i)) + parent.LogFields(otlog.Message(fmt.Sprintf("Trace%d", i))) parent.Finish() } log.Println("Done. Flushing remaining spans...") diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index b26e3d73fa3fd..09518103b22cc 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -24,15 +24,16 @@ Otherwise, the input file will be interpreted as json, and the output will be en package main import ( + "context" "encoding/json" "errors" "flag" "fmt" - "io/ioutil" "log" + "os" "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) var ( @@ -51,7 +52,7 @@ func init() { func main() { flag.Parse() - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { log.Fatalf("Error reading file: %v\n", err) } @@ -62,7 +63,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } case "thrift": @@ -70,7 +71,7 @@ func main() { if err != nil { log.Fatalf("%v\n", err) } - if err := ioutil.WriteFile(outFileName, raw, 0644); err != nil { + if err := os.WriteFile(outFileName, raw, 0644); err != nil { log.Fatalf("%v", err) } default: @@ -100,20 +101,20 @@ func jsonToZipkinThrift(jsonRaw []byte) ([]byte, error) { zspans = append(zspans, spans...) buf := thrift.NewTMemoryBuffer() - transport := thrift.NewTBinaryProtocolTransport(buf) + transport := thrift.NewTBinaryProtocolConf(buf, nil) - if err = transport.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { + if err = transport.WriteListBegin(context.Background(), thrift.STRUCT, len(spans)); err != nil { return nil, fmt.Errorf("error in beginning thrift write: %v", err) } for _, span := range zspans { - err = span.Write(transport) + err = span.Write(context.Background(), transport) if err != nil { return nil, fmt.Errorf("error converting zipkin struct to thrift: %v", err) } } - if err = transport.WriteListEnd(); err != nil { + if err = transport.WriteListEnd(context.Background()); err != nil { return nil, fmt.Errorf("error finishing thrift write: %v", err) } @@ -127,8 +128,8 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { err = fmt.Errorf("error in ReadListBegin: %v", err) return nil, err @@ -137,14 +138,14 @@ func thriftToJSONSpans(thriftData []byte) ([]byte, error) { var spans []*zipkincore.Span for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { err = fmt.Errorf("Error reading into zipkin struct: %v", err) return nil, err } spans = append(spans, zs) } - err = transport.ReadListEnd() + err = transport.ReadListEnd(context.Background()) if err != nil { err = fmt.Errorf("error ending thrift read: %v", err) return nil, err diff --git a/plugins/inputs/zipkin/codec/codec.go b/plugins/inputs/zipkin/codec/codec.go index 167b8ec24f1a3..2754e13d969e7 100644 --- a/plugins/inputs/zipkin/codec/codec.go +++ b/plugins/inputs/zipkin/codec/codec.go @@ -3,8 +3,8 @@ package codec import ( "time" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" "github.com/influxdata/telegraf/plugins/inputs/zipkin/trace" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" ) //now is a mockable time for now diff --git a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go index 1803486742301..4c054126fa95e 100644 --- a/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go +++ b/plugins/inputs/zipkin/codec/jsonV1/jsonV1.go @@ -7,7 +7,7 @@ import ( "time" "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // JSON decodes spans from bodies `POST`ed to the spans endpoint diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 0000000000000..be7b2034832d4 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,5 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +var GoUnusedProtection__ int diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go new file mode 100644 index 0000000000000..7c5b5825acaa6 --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -0,0 +1,47 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const MESSAGE_SEND = "ms" +const MESSAGE_RECV = "mr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const HTTP_HOST = "http.host" +const HTTP_METHOD = "http.method" +const HTTP_PATH = "http.path" +const HTTP_ROUTE = "http.route" +const HTTP_URL = "http.url" +const HTTP_STATUS_CODE = "http.status_code" +const HTTP_REQUEST_SIZE = "http.request.size" +const HTTP_RESPONSE_SIZE = "http.response.size" +const LOCAL_COMPONENT = "lc" +const ERROR = "error" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" +const MESSAGE_ADDR = "ma" + +func init() { +} diff --git a/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go new file mode 100644 index 0000000000000..258fd4d1a0afc --- /dev/null +++ b/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go @@ -0,0 +1,1556 @@ +// Code generated by Thrift Compiler (0.14.2). DO NOT EDIT. + +package zipkincore + +import ( + "bytes" + "context" + "database/sql/driver" + "errors" + "fmt" + "github.com/apache/thrift/lib/go/thrift" + "time" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = time.Now +var _ = bytes.Equal + +//A subset of thrift base types, except BYTES. +type AnnotationType int64 + +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: + return "BOOL" + case AnnotationType_BYTES: + return "BYTES" + case AnnotationType_I16: + return "I16" + case AnnotationType_I32: + return "I32" + case AnnotationType_I64: + return "I64" + case AnnotationType_DOUBLE: + return "DOUBLE" + case AnnotationType_STRING: + return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": + return AnnotationType_BOOL, nil + case "BYTES": + return AnnotationType_BYTES, nil + case "I16": + return AnnotationType_I16, nil + case "I32": + return AnnotationType_I32, nil + case "I64": + return AnnotationType_I64, nil + case "DOUBLE": + return AnnotationType_DOUBLE, nil + case "STRING": + return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { + return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { + q, err := AnnotationTypeFromString(string(text)) + if err != nil { + return err + } + *p = q + return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { + v, ok := value.(int64) + if !ok { + return errors.New("Scan value is not int64") + } + *p = AnnotationType(v) + return nil +} + +func (p *AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } + return int64(*p), nil +} + +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port or 0, if unknown. +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web". +// +// This is the primary parameter for trace lookup, so should be intuitive as +// possible, for example, matching names in service discovery. +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// However, it is also permissible to set service_name = "" (empty string). +// The difference in the latter usage is that the span will not be queryable +// by service name unless more information is added to the span with non-empty +// service name, e.g. an additional annotation from the server. +// +// Particularly clients may not have a reliable service name at ingest. One +// approach is to set service_name to "" at ingest, and later assign a +// better label based on binary annotations, such as user agent. +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} + +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Ipv4 = v + } + return nil +} + +func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Port = v + } + return nil +} + +func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.ServiceName = v + } + return nil +} + +func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.Ipv6 = v + } + return nil +} + +func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) + } + return err +} + +func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) + } + if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) + } + return err +} + +func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) + } + return err +} + +func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) + } + } + return err +} + +func (p *Endpoint) Equals(other *Endpoint) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Ipv4 != other.Ipv4 { + return false + } + if p.Port != other.Port { + return false + } + if p.ServiceName != other.ServiceName { + return false + } + if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { + return false + } + return true +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// Associates an event that explains latency with a timestamp. +// +// Unlike log statements, annotations are often codes: for example "sr". +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or multiplying currentTimeMillis by 1000. +// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry". +// - Host: The host that recorded the value, primarily for query by service name. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} + +var Annotation_Host_DEFAULT *Endpoint + +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } + return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Timestamp = v + } + return nil +} + +func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) + } + return err +} + +func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) + } + } + return err +} + +func (p *Annotation) Equals(other *Annotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Timestamp != other.Timestamp { + return false + } + if p.Value != other.Value { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of HTTP_PATH ("http.path") could the path +// to a resource in a RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.path" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key: Name used to lookup spans, such as "http.path" or "finagle.version". +// - Value: Serialized thrift bytes, in TBinaryProtocol format. +// +// For legacy reasons, byte order is big-endian. See THRIFT-3217. +// - AnnotationType: The thrift type of value, most often STRING. +// +// annotation_type shouldn't vary for the same key. +// - Host: The host that recorded value, allowing query by service name or address. +// +// There are two exceptions: when key is "ca" or "sa", this is the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, such as browsers or databases. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} + +var BinaryAnnotation_Host_DEFAULT *Endpoint + +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } + return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.Key = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(ctx); err != nil { + return thrift.PrependError("error reading field 2: ", err) + } else { + p.Value = v + } + return nil +} + +func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + temp := AnnotationType(v) + p.AnnotationType = temp + } + return nil +} + +func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField2(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) + } + if err := oprot.WriteBinary(ctx, p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) + } + if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) + } + return err +} + +func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) + } + if err := p.Host.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) + } + } + return err +} + +func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.Key != other.Key { + return false + } + if bytes.Compare(p.Value, other.Value) != 0 { + return false + } + if p.AnnotationType != other.AnnotationType { + return false + } + if !p.Host.Equals(other.Host) { + return false + } + return true +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// Spans are usually created by instrumentation in RPC clients or servers, but +// can also represent in-process activity. Annotations in spans are similar to +// log statements, and are sometimes created directly by application developers +// to indicate events of interest, such as a cache miss. +// +// The root span is where parent_id = Nil; it usually has the longest duration +// in the trace. +// +// Span identifiers are packed into i64s, but should be treated opaquely. +// String encoding is fixed-width lower-hex, to avoid signed interpretation. +// +// Attributes: +// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. +// - Name: Span name in lowercase, rpc method for example. Conventionally, when the +// span name isn't known, name = "unknown". +// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely +// identified in storage by (trace_id, id). +// - ParentID: The parent's Span.id; absent if this the root span in a trace. +// - Annotations: Associates events that explain latency with a timestamp. Unlike log +// statements, annotations are often codes: for example SERVER_RECV("sr"). +// Annotations are sorted ascending by timestamp. +// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For +// example, a binary annotation key could be "http.path". +// - Debug: True is a request to store this span even if it overrides sampling policy. +// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete +// span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// Timestamp is nullable for input only. Spans without a timestamp cannot be +// presented in a timeline: Span stores should not output spans missing a +// timestamp. +// +// There are two known edge-cases where this could be absent: both cases +// exist when a collector receives a span in parts and a binary annotation +// precedes a timestamp. This is possible when.. +// - The span is in-flight (ex not yet received a timestamp) +// - The span's start event was lost +// - Duration: Measurement in microseconds of the critical path, if known. Durations of +// less than one microsecond must be rounded up to 1 microsecond. +// +// This value should be set directly, as opposed to implicitly via annotation +// timestamps. Doing so encourages precision decoupled from problems of +// clocks, such as skew or NTP updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} + +var Span_ParentID_DEFAULT int64 + +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } + return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} + +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} + +var Span_Timestamp_DEFAULT int64 + +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } + return *p.Timestamp +} + +var Span_Duration_DEFAULT int64 + +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } + return *p.Duration +} + +var Span_TraceIDHigh_DEFAULT int64 + +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } + return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { + break + } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(ctx, iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(ctx, fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(ctx); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 1: ", err) + } else { + p.TraceID = v + } + return nil +} + +func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(ctx); err != nil { + return thrift.PrependError("error reading field 3: ", err) + } else { + p.Name = v + } + return nil +} + +func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 4: ", err) + } else { + p.ID = v + } + return nil +} + +func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 5: ", err) + } else { + p.ParentID = &v + } + return nil +} + +func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i++ { + _elem0 := &Annotation{} + if err := _elem0.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin(ctx) + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(ctx, iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(ctx); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(ctx); err != nil { + return thrift.PrependError("error reading field 9: ", err) + } else { + p.Debug = v + } + return nil +} + +func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 10: ", err) + } else { + p.Timestamp = &v + } + return nil +} + +func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 11: ", err) + } else { + p.Duration = &v + } + return nil +} + +func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(ctx); err != nil { + return thrift.PrependError("error reading field 12: ", err) + } else { + p.TraceIDHigh = &v + } + return nil +} + +func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin(ctx, "Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if p != nil { + if err := p.writeField1(ctx, oprot); err != nil { + return err + } + if err := p.writeField3(ctx, oprot); err != nil { + return err + } + if err := p.writeField4(ctx, oprot); err != nil { + return err + } + if err := p.writeField5(ctx, oprot); err != nil { + return err + } + if err := p.writeField6(ctx, oprot); err != nil { + return err + } + if err := p.writeField8(ctx, oprot); err != nil { + return err + } + if err := p.writeField9(ctx, oprot); err != nil { + return err + } + if err := p.writeField10(ctx, oprot); err != nil { + return err + } + if err := p.writeField11(ctx, oprot); err != nil { + return err + } + if err := p.writeField12(ctx, oprot); err != nil { + return err + } + } + if err := oprot.WriteFieldStop(ctx); err != nil { + return thrift.PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(ctx); err != nil { + return thrift.PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) + } + return err +} + +func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) + } + if err := oprot.WriteString(ctx, string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) + } + return err +} + +func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) + } + return err +} + +func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) + } + } + return err +} + +func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) + } + return err +} + +func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) + } + if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(ctx, oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(ctx); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) + } + return err +} + +func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) + } + if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) + } + } + return err +} + +func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) + } + } + return err +} + +func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) + } + } + return err +} + +func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) + } + if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(ctx); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) + } + } + return err +} + +func (p *Span) Equals(other *Span) bool { + if p == other { + return true + } else if p == nil || other == nil { + return false + } + if p.TraceID != other.TraceID { + return false + } + if p.Name != other.Name { + return false + } + if p.ID != other.ID { + return false + } + if p.ParentID != other.ParentID { + if p.ParentID == nil || other.ParentID == nil { + return false + } + if (*p.ParentID) != (*other.ParentID) { + return false + } + } + if len(p.Annotations) != len(other.Annotations) { + return false + } + for i, _tgt := range p.Annotations { + _src2 := other.Annotations[i] + if !_tgt.Equals(_src2) { + return false + } + } + if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { + return false + } + for i, _tgt := range p.BinaryAnnotations { + _src3 := other.BinaryAnnotations[i] + if !_tgt.Equals(_src3) { + return false + } + } + if p.Debug != other.Debug { + return false + } + if p.Timestamp != other.Timestamp { + if p.Timestamp == nil || other.Timestamp == nil { + return false + } + if (*p.Timestamp) != (*other.Timestamp) { + return false + } + } + if p.Duration != other.Duration { + if p.Duration == nil || other.Duration == nil { + return false + } + if (*p.Duration) != (*other.Duration) { + return false + } + } + if p.TraceIDHigh != other.TraceIDHigh { + if p.TraceIDHigh == nil || other.TraceIDHigh == nil { + return false + } + if (*p.TraceIDHigh) != (*other.TraceIDHigh) { + return false + } + } + return true +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} diff --git a/plugins/inputs/zipkin/codec/thrift/thrift.go b/plugins/inputs/zipkin/codec/thrift/thrift.go index 65a9e1488c2c4..c2c60a3395d2d 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift.go @@ -1,16 +1,16 @@ package thrift import ( + "context" "encoding/binary" "fmt" "net" "strconv" "time" - "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" - "github.com/apache/thrift/lib/go/thrift" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) // UnmarshalThrift converts raw bytes in thrift format to a slice of spans @@ -20,8 +20,8 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { return nil, err } - transport := thrift.NewTBinaryProtocolTransport(buffer) - _, size, err := transport.ReadListBegin() + transport := thrift.NewTBinaryProtocolConf(buffer, nil) + _, size, err := transport.ReadListBegin(context.Background()) if err != nil { return nil, err } @@ -29,13 +29,13 @@ func UnmarshalThrift(body []byte) ([]*zipkincore.Span, error) { spans := make([]*zipkincore.Span, size) for i := 0; i < size; i++ { zs := &zipkincore.Span{} - if err = zs.Read(transport); err != nil { + if err = zs.Read(context.Background(), transport); err != nil { return nil, err } spans[i] = zs } - if err = transport.ReadListEnd(); err != nil { + if err = transport.ReadListEnd(context.Background()); err != nil { return nil, err } return spans, nil diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 798fc269edf86..ea566e4bfd0c8 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -1,12 +1,12 @@ package thrift import ( - "io/ioutil" + "os" "testing" "github.com/google/go-cmp/cmp" - "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" + "github.com/influxdata/telegraf/plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore" ) func Test_endpointHost(t *testing.T) { @@ -193,7 +193,7 @@ func TestUnmarshalThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - dat, err := ioutil.ReadFile(tt.filename) + dat, err := os.ReadFile(tt.filename) if err != nil { t.Fatalf("Could not find file %s\n", tt.filename) } diff --git a/plugins/inputs/zipkin/handler.go b/plugins/inputs/zipkin/handler.go index 24e7ac12f01be..83288bd6e4b2e 100644 --- a/plugins/inputs/zipkin/handler.go +++ b/plugins/inputs/zipkin/handler.go @@ -3,7 +3,7 @@ package zipkin import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "mime" "net/http" "strings" @@ -88,7 +88,7 @@ func (s *SpanHandler) Spans(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnsupportedMediaType) } - octets, err := ioutil.ReadAll(body) + octets, err := io.ReadAll(body) if err != nil { s.recorder.Error(err) w.WriteHeader(http.StatusInternalServerError) diff --git a/plugins/inputs/zipkin/handler_test.go b/plugins/inputs/zipkin/handler_test.go index b0176a22ead3c..f6e8bece80240 100644 --- a/plugins/inputs/zipkin/handler_test.go +++ b/plugins/inputs/zipkin/handler_test.go @@ -2,9 +2,10 @@ package zipkin import ( "bytes" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "os" "strconv" "testing" "time" @@ -28,7 +29,7 @@ func (m *MockRecorder) Error(err error) { } func TestSpanHandler(t *testing.T) { - dat, err := ioutil.ReadFile("testdata/threespans.dat") + dat, err := os.ReadFile("testdata/threespans.dat") if err != nil { t.Fatalf("Could not find file %s\n", "testdata/threespans.dat") } @@ -37,7 +38,7 @@ func TestSpanHandler(t *testing.T) { r := httptest.NewRequest( "POST", "http://server.local/api/v1/spans", - ioutil.NopCloser( + io.NopCloser( bytes.NewReader(dat))) r.Header.Set("Content-Type", "application/x-thrift") diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index 77bef853b7e52..0c0bab279cc7f 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -3,8 +3,8 @@ package zipkin import ( "bytes" "fmt" - "io/ioutil" "net/http" + "os" "testing" "time" @@ -637,7 +637,7 @@ func TestZipkinPlugin(t *testing.T) { } func postThriftData(datafile, address, contentType string) error { - dat, err := ioutil.ReadFile(datafile) + dat, err := os.ReadFile(datafile) if err != nil { return fmt.Errorf("could not read from data file %s", datafile) } diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index f74c7d89a4360..acd2a554462ac 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -15,6 +15,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/discard" _ "github.com/influxdata/telegraf/plugins/outputs/dynatrace" _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/outputs/event_hubs" _ "github.com/influxdata/telegraf/plugins/outputs/exec" _ "github.com/influxdata/telegraf/plugins/outputs/execd" _ "github.com/influxdata/telegraf/plugins/outputs/file" diff --git a/plugins/outputs/azure_data_explorer/README.md b/plugins/outputs/azure_data_explorer/README.md index bb6d0d039b0d2..db2aba469d292 100644 --- a/plugins/outputs/azure_data_explorer/README.md +++ b/plugins/outputs/azure_data_explorer/README.md @@ -1,10 +1,11 @@ # Azure Data Explorer output plugin -This plugin writes metrics collected by any of the input plugins of Telegraf to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +This plugin writes data collected by any of the Telegraf input plugins to [Azure Data Explorer](https://azure.microsoft.com/en-au/services/data-explorer/). +Azure Data Explorer is a distributed, columnar store, purpose built for any type of logs, metrics and time series data. ## Pre-requisites: - [Create Azure Data Explorer cluster and database](https://docs.microsoft.com/en-us/azure/data-explorer/create-cluster-database-portal) -- VM/compute or container to host Telegraf - it could be hosted locally where an app/services to be monitored are deployed or remotely on a dedicated monitoring compute/container. +- VM/compute or container to host Telegraf - it could be hosted locally where an app/service to be monitored is deployed or remotely on a dedicated monitoring compute/container. ## Configuration: @@ -21,7 +22,7 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to # database = "" ## Timeout for Azure Data Explorer operations - # timeout = "15s" + # timeout = "20s" ## Type of metrics grouping used when pushing to Azure Data Explorer. ## Default is "TablePerMetric" for one table per different metric. @@ -31,8 +32,9 @@ This plugin writes metrics collected by any of the input plugins of Telegraf to ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" - # timeout = "20s" - + ## Creates tables and relevant mapping if set to true(default). + ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. + # create_tables = true ``` ## Metrics Grouping @@ -48,12 +50,12 @@ The table name will match the `name` property of the metric, this means that the ### SingleTable -The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). +The plugin will send all the metrics received to a single Azure Data Explorer table. The name of the table must be supplied via `table_name` in the config file. If the table doesn't exist the plugin will create the table, if the table exists then the plugin will try to merge the Telegraf metric schema to the existing table. For more information about the merge process check the [`.create-merge` documentation](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/create-merge-table-command). ## Tables Schema -The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command would be like the following: +The schema of the Azure Data Explorer table will match the structure of the Telegraf `Metric` object. The corresponding Azure Data Explorer command generated by the plugin would be like the following: ``` .create-merge table ['table-name'] (['fields']:dynamic, ['name']:string, ['tags']:dynamic, ['timestamp']:datetime) ``` @@ -63,7 +65,7 @@ The corresponding table mapping would be like the following: .create-or-alter table ['table-name'] ingestion json mapping 'table-name_mapping' '[{"column":"fields", "Properties":{"Path":"$[\'fields\']"}},{"column":"name", "Properties":{"Path":"$[\'name\']"}},{"column":"tags", "Properties":{"Path":"$[\'tags\']"}},{"column":"timestamp", "Properties":{"Path":"$[\'timestamp\']"}}]' ``` -**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. Since the `Metric` object is a complex type, the only output format supported is JSON. +**Note**: This plugin will automatically create Azure Data Explorer tables and corresponding table mapping as per the above mentioned commands. ## Authentiation @@ -87,7 +89,10 @@ These methods are: [principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects -Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will allow the plugin to create the required tables and ingest data into it. +Whichever method, the designated Principal needs to be assigned the `Database User` role on the Database level in the Azure Data Explorer. This role will +allow the plugin to create the required tables and ingest data into it. +If `create_tables=false` then the designated principal only needs the `Database Ingestor` role at least. + ### Configurations of the chosen Authentication Method @@ -95,7 +100,7 @@ The plugin will authenticate using the first available of the following configurations, **it's important to understand that the assessment, and consequently choosing the authentication method, will happen in order as below**: 1. **Client Credentials**: Azure AD Application ID and Secret. - + Set the following environment variables: - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. @@ -126,50 +131,72 @@ following configurations, **it's important to understand that the assessment, an [arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview -## Querying collected metrics data in Azure Data Explorer -With all above configurations, you will have data stored in following standard format for each metric type stored as an Azure Data Explorer table - -ColumnName | ColumnType ----------- | ---------- -fields | dynamic -name | string -tags | dynamic -timestamp | datetime - -As "fields" and "tags" are of dynamic data type so following multiple ways to query this data - -1. **Query JSON attributes directly**: This is one of the coolest feature of Azure Data Explorer so you can run query like this - - ``` - Tablename - | where fields.size_kb == 9120 - ``` -2. **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: to transform data, in this case, to flatten dynamic data type columns. This is the recommended performant way for querying over large data volumes compared to querying directly over JSON attributes. +## Querying data collected in Azure Data Explorer +Examples of data transformations and queries that would be useful to gain insights - +1. **Data collected using SQL input plugin** + + Sample SQL metrics data - + + name | tags | timestamp | fields + -----|------|-----------|------- + sqlserver_database_io|{"database_name":"azure-sql-db2","file_type":"DATA","host":"adx-vm","logical_filename":"tempdev","measurement_db_type":"AzureSQLDB","physical_filename":"tempdb.mdf","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server"}|2021-09-09T13:51:20Z|{"current_size_mb":16,"database_id":2,"file_id":1,"read_bytes":2965504,"read_latency_ms":68,"reads":47,"rg_read_stall_ms":42,"rg_write_stall_ms":0,"space_used_mb":0,"write_bytes":1220608,"write_latency_ms":103,"writes":149} + sqlserver_waitstats|{"database_name":"azure-sql-db2","host":"adx-vm","measurement_db_type":"AzureSQLDB","replica_updateability":"READ_WRITE","sql_instance":"adx-sql-server","wait_category":"Worker Thread","wait_type":"THREADPOOL"}|2021-09-09T13:51:20Z|{"max_wait_time_ms":15,"resource_wait_ms":4469,"signal_wait_time_ms":0,"wait_time_ms":4469,"waiting_tasks_count":1464} + + + Since collected metrics object is of complex type so "fields" and "tags" are stored as dynamic data type, multiple ways to query this data- + + - **Query JSON attributes directly**: Azure Data Explorer provides an ability to query JSON data in raw format without parsing it, so JSON attributes can be queried directly in following way - + ``` + Tablename + | where name == "sqlserver_azure_db_resource_stats" and todouble(fields.avg_cpu_percent) > 7 + ``` + ``` + Tablename + | distinct tostring(tags.database_name) + ``` + **Note** - This approach could have performance impact in case of large volumes of data, use belwo mentioned approach for such cases. + + - **Use [Update policy](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/management/updatepolicy)**: Transform dynamic data type columns using update policy. This is the recommended performant way for querying over large volumes of data compared to querying directly over JSON attributes. + ``` // Function to transform data .create-or-alter function Transform_TargetTableName() { - SourceTableName - | extend clerk_type = tags.clerk_type - | extend host = tags.host + SourceTableName + | mv-apply fields on (extend key = tostring(bag_keys(fields)[0])) + | project fieldname=key, value=todouble(fields[key]), name, tags, timestamp } - // Create the destination table (if it doesn't exist already) + // Create destination table with above query's results schema (if it doesn't exist already) .set-or-append TargetTableName <| Transform_TargetTableName() | limit 0 // Apply update policy on destination table .alter table TargetTableName policy update - @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": false, "PropagateIngestionProperties": false}]' - + @'[{"IsEnabled": true, "Source": "SourceTableName", "Query": "Transform_TargetTableName()", "IsTransactional": true, "PropagateIngestionProperties": false}]' ``` - There are two ways to flatten dynamic columns as explained below. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' - - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic columns as shown below. This method will unpack all columns, it could lead to issues in case source schema changes. - ``` - Tablename - | evaluate bag_unpack(tags) - | evaluate bag_unpack(fields) - ``` - - - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator as shown below. This is the best way provided you know what columns are needed in the final destination table. Another benefit of this method is even if schema changes, it will not break your queries or dashboards. - ``` - Tablename - | extend clerk_type = tags.clerk_type - | extend host = tags.host - ``` + +2. **Data collected using syslog input plugin** + + Sample syslog data - + + name | tags | timestamp | fields + -----|------|-----------|------- + syslog|{"appname":"azsecmond","facility":"user","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:36:44Z|{"facility_code":1,"message":" 2021/09/20 14:36:44.890110 Failed to connect to mdsd: dial unix /var/run/mdsd/default_djson.socket: connect: no such file or directory","procid":"2184","severity_code":6,"timestamp":"1632148604890477000","version":1} + syslog|{"appname":"CRON","facility":"authpriv","host":"adx-linux-vm","hostname":"adx-linux-vm","severity":"info"}|2021-09-20T14:37:01Z|{"facility_code":10,"message":" pam_unix(cron:session): session opened for user root by (uid=0)","procid":"26446","severity_code":6,"timestamp":"1632148621120781000","version":1} + + There are multiple ways to flatten dynamic columns using 'extend' or 'bag_unpack' operator. You can use either of these ways in above mentioned update policy function - 'Transform_TargetTableName()' + + - Use [extend](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extendoperator) operator - This is the recommended approach compared to 'bag_unpack' as it is faster and robust. Even if schema changes, it will not break queries or dashboards. + ``` + Tablenmae + | extend facility_code=toint(fields.facility_code), message=tostring(fields.message), procid= tolong(fields.procid), severity_code=toint(fields.severity_code), + SysLogTimestamp=unixtime_nanoseconds_todatetime(tolong(fields.timestamp)), version= todouble(fields.version), + appname= tostring(tags.appname), facility= tostring(tags.facility),host= tostring(tags.host), hostname=tostring(tags.hostname), severity=tostring(tags.severity) + | project-away fields, tags + ``` + - Use [bag_unpack plugin](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/bag-unpackplugin) to unpack the dynamic type columns automatically. This method could lead to issues if source schema changes as its dynamically expanding columns. + ``` + Tablename + | evaluate bag_unpack(tags, columnsConflict='replace_source') + | evaluate bag_unpack(fields, columnsConflict='replace_source') + ``` diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer.go b/plugins/outputs/azure_data_explorer/azure_data_explorer.go index 6d411fd05c3b9..1f958d525004d 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer.go @@ -27,6 +27,7 @@ type AzureDataExplorer struct { Timeout config.Duration `toml:"timeout"` MetricsGrouping string `toml:"metrics_grouping_type"` TableName string `toml:"table_name"` + CreateTables bool `toml:"create_tables"` client localClient ingesters map[string]localIngestor serializer serializers.Serializer @@ -57,7 +58,7 @@ func (adx *AzureDataExplorer) Description() string { func (adx *AzureDataExplorer) SampleConfig() string { return ` - ## Azure Data Exlorer cluster endpoint + ## Azure Data Explorer cluster endpoint ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" endpoint_url = "" @@ -77,6 +78,9 @@ func (adx *AzureDataExplorer) SampleConfig() string { ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # table_name = "" + ## Creates tables and relevant mapping if set to true(default). + ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. + # create_tables = true ` } @@ -198,6 +202,10 @@ func (adx *AzureDataExplorer) getIngestor(ctx context.Context, tableName string) } func (adx *AzureDataExplorer) createAzureDataExplorerTable(ctx context.Context, tableName string) error { + if !adx.CreateTables { + adx.Log.Info("skipped table creation") + return nil + } createStmt := kusto.NewStmt("", kusto.UnsafeStmt(unsafe.Stmt{Add: true, SuppressWarning: true})).UnsafeAdd(fmt.Sprintf(createTableCommand, tableName)) if _, err := adx.client.Mgmt(ctx, adx.Database, createStmt); err != nil { return err @@ -230,7 +238,7 @@ func (adx *AzureDataExplorer) Init() error { return errors.New("Metrics grouping type is not valid") } - serializer, err := json.NewSerializer(time.Second) + serializer, err := json.NewSerializer(time.Second, "") // FIXME: get the json.TimestampFormat from the config file if err != nil { return err } @@ -241,7 +249,8 @@ func (adx *AzureDataExplorer) Init() error { func init() { outputs.Add("azure_data_explorer", func() telegraf.Output { return &AzureDataExplorer{ - Timeout: config.Duration(20 * time.Second), + Timeout: config.Duration(20 * time.Second), + CreateTables: true, } }) } diff --git a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go index f85d074cb1f6f..ce53acf43faf4 100644 --- a/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go +++ b/plugins/outputs/azure_data_explorer/azure_data_explorer_test.go @@ -31,10 +31,12 @@ func TestWrite(t *testing.T) { tableName string expected map[string]interface{} expectedWriteError string + createTables bool }{ { - name: "Valid metric", - inputMetric: testutil.MockMetrics(), + name: "Valid metric", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -56,8 +58,34 @@ func TestWrite(t *testing.T) { }, }, { - name: "Error in Mgmt", - inputMetric: testutil.MockMetrics(), + name: "Don't create tables'", + inputMetric: testutil.MockMetrics(), + createTables: false, + client: &fakeClient{ + queries: make([]string, 0), + internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { + require.Fail(t, "Mgmt shouldn't be called when create_tables is false") + f.queries = append(f.queries, query.String()) + return &kusto.RowIterator{}, nil + }, + }, + createIngestor: createFakeIngestor, + metricsGrouping: tablePerMetric, + expected: map[string]interface{}{ + "metricName": "test1", + "fields": map[string]interface{}{ + "value": 1.0, + }, + "tags": map[string]interface{}{ + "tag1": "value1", + }, + "timestamp": float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).UnixNano() / int64(time.Second)), + }, + }, + { + name: "Error in Mgmt", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -79,8 +107,9 @@ func TestWrite(t *testing.T) { expectedWriteError: "creating table for \"test1\" failed: Something went wrong", }, { - name: "SingleTable metric grouping type", - inputMetric: testutil.MockMetrics(), + name: "SingleTable metric grouping type", + inputMetric: testutil.MockMetrics(), + createTables: true, client: &fakeClient{ queries: make([]string, 0), internalMgmt: func(f *fakeClient, ctx context.Context, db string, query kusto.Stmt, options ...kusto.MgmtOption) (*kusto.RowIterator, error) { @@ -105,7 +134,7 @@ func TestWrite(t *testing.T) { for _, tC := range testCases { t.Run(tC.name, func(t *testing.T) { - serializer, err := telegrafJson.NewSerializer(time.Second) + serializer, err := telegrafJson.NewSerializer(time.Second, "") require.NoError(t, err) plugin := AzureDataExplorer{ @@ -114,6 +143,7 @@ func TestWrite(t *testing.T) { Log: testutil.Logger{}, MetricsGrouping: tC.metricsGrouping, TableName: tC.tableName, + CreateTables: tC.createTables, client: tC.client, ingesters: map[string]localIngestor{}, createIngestor: tC.createIngestor, @@ -149,11 +179,15 @@ func TestWrite(t *testing.T) { expectedTime := tC.expected["timestamp"].(float64) require.Equal(t, expectedTime, createdFakeIngestor.actualOutputMetric["timestamp"]) - createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) - require.Equal(t, createTableString, tC.client.queries[0]) + if tC.createTables { + createTableString := fmt.Sprintf(createTableCommandExpected, expectedNameOfTable) + require.Equal(t, createTableString, tC.client.queries[0]) - createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) - require.Equal(t, createTableMappingString, tC.client.queries[1]) + createTableMappingString := fmt.Sprintf(createTableMappingCommandExpected, expectedNameOfTable, expectedNameOfTable) + require.Equal(t, createTableMappingString, tC.client.queries[1]) + } else { + require.Empty(t, tC.client.queries) + } } }) } @@ -185,10 +219,10 @@ type fakeIngestor struct { actualOutputMetric map[string]interface{} } -func createFakeIngestor(client localClient, database string, tableName string) (localIngestor, error) { +func createFakeIngestor(localClient, string, string) (localIngestor, error) { return &fakeIngestor{}, nil } -func (f *fakeIngestor) FromReader(ctx context.Context, reader io.Reader, options ...ingest.FileOption) (*ingest.Result, error) { +func (f *fakeIngestor) FromReader(_ context.Context, reader io.Reader, _ ...ingest.FileOption) (*ingest.Result, error) { scanner := bufio.NewScanner(reader) scanner.Scan() firstLine := scanner.Text() diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md index 6f2abb97ec3ed..9d835c1eb6f4b 100644 --- a/plugins/outputs/azure_monitor/README.md +++ b/plugins/outputs/azure_monitor/README.md @@ -40,7 +40,7 @@ written as a dimension on each Azure Monitor metric. ## The Azure Resource ID against which metric will be logged, e.g. ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # resource_id = "" - + ## Optionally, if in Azure US Government, China, or other sovereign ## cloud environment, set the appropriate REST endpoint for receiving ## metrics. (Note: region may be unused in this context) @@ -76,7 +76,7 @@ preferred authentication methods are different from the *order* in which each authentication is checked. Here are the preferred authentication methods: 1. Managed Service Identity (MSI) token - - This is the prefered authentication method. Telegraf will automatically + - This is the preferred authentication method. Telegraf will automatically authenticate using this method when running on Azure VMs. 2. AAD Application Tokens (Service Principals) - Primarily useful if Telegraf is writing metrics for other resources. @@ -132,7 +132,7 @@ authenticate when running Telegraf on Azure VMs. Azure Monitor only accepts values with a numeric type. The plugin will drop fields with a string type by default. The plugin can set all string type fields as extra dimensions in the Azure Monitor custom metric by setting the -configuration option `strings_as_dimensions` to `true`. +configuration option `strings_as_dimensions` to `true`. Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The plugin will deterministically dropped any dimensions that exceed the 10 diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go index e513dbdca23e9..ca511a5211860 100644 --- a/plugins/outputs/azure_monitor/azure_monitor.go +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -7,7 +7,7 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" + "io" "net/http" "regexp" "strings" @@ -221,7 +221,7 @@ func vmInstanceMetadata(c *http.Client) (string, string, error) { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return "", "", err } @@ -356,7 +356,7 @@ func (a *AzureMonitor) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go index c702f46b0e0b5..db8243e82d5ad 100644 --- a/plugins/outputs/azure_monitor/azure_monitor_test.go +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -6,10 +6,12 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "os" "testing" "time" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -209,7 +211,11 @@ func TestAggregate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.plugin.Connect() + msiEndpoint, err := adal.GetMSIVMEndpoint() + require.NoError(t, err) + + os.Setenv("MSI_ENDPOINT", msiEndpoint) + err = tt.plugin.Connect() require.NoError(t, err) // Reset globals @@ -367,3 +373,15 @@ func TestWrite(t *testing.T) { }) } } + +func TestMain(m *testing.M) { + // Set up a fake environment for adal.getMSIType() + // Root cause: https://github.com/Azure/go-autorest/commit/def88ef859fb980eff240c755a70597bc9b490d0 + err := os.Setenv("MSI_ENDPOINT", "fake.endpoint") + + if err != nil { + panic(err) + } + + os.Exit(m.Run()) +} diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index ddf3622328ba2..129f014bfb548 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -1,13 +1,15 @@ package cloudwatch import ( + "context" "math" "sort" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -17,7 +19,7 @@ import ( type CloudWatch struct { Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace HighResolutionMetrics bool `toml:"high_resolution_metrics"` - svc *cloudwatch.CloudWatch + svc *cloudwatch.Client WriteStatistics bool `toml:"write_statistics"` @@ -38,7 +40,7 @@ const ( type cloudwatchField interface { addValue(sType statisticType, value float64) - buildDatum() []*cloudwatch.MetricDatum + buildDatum() []types.MetricDatum } type statisticField struct { @@ -56,8 +58,8 @@ func (f *statisticField) addValue(sType statisticType, value float64) { } } -func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { - var datums []*cloudwatch.MetricDatum +func (f *statisticField) buildDatum() []types.MetricDatum { + var datums []types.MetricDatum if f.hasAllFields() { // If we have all required fields, we build datum with StatisticValues @@ -66,24 +68,24 @@ func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { sum := f.values[statisticTypeSum] count := f.values[statisticTypeCount] - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ Minimum: aws.Float64(min), Maximum: aws.Float64(max), Sum: aws.Float64(sum), SampleCount: aws.Float64(count), }, - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), } datums = append(datums, datum) } else { // If we don't have all required fields, we build each field as independent datum for sType, value := range f.values { - datum := &cloudwatch.MetricDatum{ + datum := types.MetricDatum{ Value: aws.Float64(value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), @@ -134,14 +136,14 @@ func (f *valueField) addValue(sType statisticType, value float64) { } } -func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { - return []*cloudwatch.MetricDatum{ +func (f *valueField) buildDatum() []types.MetricDatum { + return []types.MetricDatum{ { MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), Value: aws.Float64(f.value), Dimensions: BuildDimensions(f.tags), Timestamp: aws.Time(f.timestamp), - StorageResolution: aws.Int64(f.storageResolution), + StorageResolution: aws.Int32(int32(f.storageResolution)), }, } } @@ -177,12 +179,12 @@ var sampleConfig = ` ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" - ## If you have a large amount of metrics, you should consider to send statistic - ## values instead of raw metrics which could not only improve performance but - ## also save AWS API cost. If enable this flag, this plugin would parse the required - ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. - ## You could use basicstats aggregator to calculate those fields. If not all statistic - ## fields are available, all fields would still be sent as raw metrics. + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. # write_statistics = false ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) @@ -198,7 +200,12 @@ func (c *CloudWatch) Description() string { } func (c *CloudWatch) Connect() error { - c.svc = cloudwatch.New(c.CredentialConfig.Credentials()) + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + + c.svc = cloudwatch.NewFromConfig(cfg) return nil } @@ -207,7 +214,7 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, m := range metrics { d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) datums = append(datums, d...) @@ -225,13 +232,13 @@ func (c *CloudWatch) Write(metrics []telegraf.Metric) error { return nil } -func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { +func (c *CloudWatch) WriteToCloudWatch(datums []types.MetricDatum) error { params := &cloudwatch.PutMetricDataInput{ MetricData: datums, Namespace: aws.String(c.Namespace), } - _, err := c.svc.PutMetricData(params) + _, err := c.svc.PutMetricData(context.Background(), params) if err != nil { c.Log.Errorf("Unable to write to CloudWatch : %+v", err.Error()) @@ -242,13 +249,13 @@ func (c *CloudWatch) WriteToCloudWatch(datums []*cloudwatch.MetricDatum) error { // Partition the MetricDatums into smaller slices of a max size so that are under the limit // for the AWS API calls. -func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch.MetricDatum { +func PartitionDatums(size int, datums []types.MetricDatum) [][]types.MetricDatum { numberOfPartitions := len(datums) / size if len(datums)%size != 0 { numberOfPartitions++ } - partitions := make([][]*cloudwatch.MetricDatum, numberOfPartitions) + partitions := make([][]types.MetricDatum, numberOfPartitions) for i := 0; i < numberOfPartitions; i++ { start := size * i @@ -266,7 +273,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch // Make a MetricDatum from telegraf.Metric. It would check if all required fields of // cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. // Otherwise, fields would still been built independently. -func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { +func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []types.MetricDatum { fields := make(map[string]cloudwatchField) tags := point.Tags() storageResolution := int64(60) @@ -316,7 +323,7 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel } } - var datums []*cloudwatch.MetricDatum + var datums []types.MetricDatum for _, f := range fields { d := f.buildDatum() datums = append(datums, d...) @@ -328,13 +335,13 @@ func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point tel // Make a list of Dimensions by using a Point's tags. CloudWatch supports up to // 10 dimensions per metric so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. -func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { +func BuildDimensions(mTags map[string]string) []types.Dimension { const MaxDimensions = 10 - dimensions := make([]*cloudwatch.Dimension, 0, MaxDimensions) + dimensions := make([]types.Dimension, 0, MaxDimensions) // This is pretty ugly but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String("host"), Value: aws.String(host), }) @@ -358,7 +365,7 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { continue } - dimensions = append(dimensions, &cloudwatch.Dimension{ + dimensions = append(dimensions, types.Dimension{ Name: aws.String(k), Value: aws.String(mTags[k]), }) diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 95987f591830d..df98381cf3f90 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -2,14 +2,13 @@ package cloudwatch import ( "fmt" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "math" "sort" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - + "github.com/aws/aws-sdk-go-v2/aws" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" @@ -117,8 +116,8 @@ func TestBuildMetricDatums(t *testing.T) { } func TestMetricDatumResolution(t *testing.T) { - const expectedStandardResolutionValue = int64(60) - const expectedHighResolutionValue = int64(1) + const expectedStandardResolutionValue = int32(60) + const expectedHighResolutionValue = int32(1) assert := assert.New(t) @@ -153,19 +152,19 @@ func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { func TestPartitionDatums(t *testing.T) { assert := assert.New(t) - testDatum := cloudwatch.MetricDatum{ + testDatum := types.MetricDatum{ MetricName: aws.String("Foo"), Value: aws.Float64(1), } - zeroDatum := []*cloudwatch.MetricDatum{} - oneDatum := []*cloudwatch.MetricDatum{&testDatum} - twoDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum} - threeDatum := []*cloudwatch.MetricDatum{&testDatum, &testDatum, &testDatum} + zeroDatum := []types.MetricDatum{} + oneDatum := []types.MetricDatum{testDatum} + twoDatum := []types.MetricDatum{testDatum, testDatum} + threeDatum := []types.MetricDatum{testDatum, testDatum, testDatum} - assert.Equal([][]*cloudwatch.MetricDatum{}, PartitionDatums(2, zeroDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) - assert.Equal([][]*cloudwatch.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) + assert.Equal([][]types.MetricDatum{}, PartitionDatums(2, zeroDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{oneDatum}, PartitionDatums(2, oneDatum)) + assert.Equal([][]types.MetricDatum{twoDatum}, PartitionDatums(2, twoDatum)) + assert.Equal([][]types.MetricDatum{twoDatum, oneDatum}, PartitionDatums(2, threeDatum)) } diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go index 79eb5d7722f13..952fea4b2a9a4 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs.go @@ -1,19 +1,21 @@ package cloudwatch_logs import ( + "context" "fmt" "sort" "strings" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" ) type messageBatch struct { - logEvents []*cloudwatchlogs.InputLogEvent + logEvents []types.InputLogEvent messageCount int } type logStreamContainer struct { @@ -25,16 +27,16 @@ type logStreamContainer struct { //Cloudwatch Logs service interface type cloudWatchLogs interface { - DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) - DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) - CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) - PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) + DescribeLogGroups(context.Context, *cloudwatchlogs.DescribeLogGroupsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogGroupsOutput, error) + DescribeLogStreams(context.Context, *cloudwatchlogs.DescribeLogStreamsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.DescribeLogStreamsOutput, error) + CreateLogStream(context.Context, *cloudwatchlogs.CreateLogStreamInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.CreateLogStreamOutput, error) + PutLogEvents(context.Context, *cloudwatchlogs.PutLogEventsInput, ...func(options *cloudwatchlogs.Options)) (*cloudwatchlogs.PutLogEventsOutput, error) } // CloudWatchLogs plugin object definition type CloudWatchLogs struct { - LogGroup string `toml:"log_group"` - lg *cloudwatchlogs.LogGroup //log group data + LogGroup string `toml:"log_group"` + lg *types.LogGroup //log group data LogStream string `toml:"log_stream"` lsKey string //log stream source: tag or field @@ -108,12 +110,12 @@ region = "us-east-1" ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place -log_group = "my-group-name" +log_group = "my-group-name" ## Log stream in log group ## Either log group name or reference to metric attribute, from which it can be parsed: ## tag: or field:. If log stream is not exist, it will be created. -## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) log_stream = "tag:location" @@ -126,7 +128,7 @@ log_data_metric_name = "docker_log" ## Specify from which metric attribute the log data should be retrieved: ## tag: or field:. ## I.e., if you are using docker_log plugin to stream logs from container, then -## specify log_data_source = "field:message" +## specify log_data_source = "field:message" log_data_source = "field:message" ` @@ -187,15 +189,17 @@ func (c *CloudWatchLogs) Connect() error { var logGroupsOutput = &cloudwatchlogs.DescribeLogGroupsOutput{NextToken: &dummyToken} var err error - c.svc = cloudwatchlogs.New(c.CredentialConfig.Credentials()) - if c.svc == nil { - return fmt.Errorf("can't create cloudwatch logs service endpoint") + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err } + c.svc = cloudwatchlogs.NewFromConfig(cfg) //Find log group with name 'c.LogGroup' if c.lg == nil { //In case connection is not retried, first time for logGroupsOutput.NextToken != nil { logGroupsOutput, err = c.svc.DescribeLogGroups( + context.Background(), &cloudwatchlogs.DescribeLogGroupsInput{ LogGroupNamePrefix: &c.LogGroup, NextToken: queryToken}) @@ -208,7 +212,7 @@ func (c *CloudWatchLogs) Connect() error { for _, logGroup := range logGroupsOutput.LogGroups { if *(logGroup.LogGroupName) == c.LogGroup { c.Log.Debugf("Found log group %q", c.LogGroup) - c.lg = logGroup + c.lg = &logGroup //nolint:revive } } } @@ -321,7 +325,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { lsContainer = val } else { lsContainer.messageBatches[0].messageCount = 0 - lsContainer.messageBatches[0].logEvents = []*cloudwatchlogs.InputLogEvent{} + lsContainer.messageBatches[0].logEvents = []types.InputLogEvent{} c.ls[logStream] = lsContainer } @@ -331,7 +335,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { lsContainer.currentBatchIndex++ lsContainer.messageBatches = append(lsContainer.messageBatches, messageBatch{ - logEvents: []*cloudwatchlogs.InputLogEvent{}, + logEvents: []types.InputLogEvent{}, messageCount: 0}) lsContainer.currentBatchSizeBytes = messageSizeInBytesForAWS } else { @@ -345,7 +349,7 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { //Adding metring to batch lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents = append(lsContainer.messageBatches[lsContainer.currentBatchIndex].logEvents, - &cloudwatchlogs.InputLogEvent{ + types.InputLogEvent{ Message: &logData, Timestamp: &metricTime}) } @@ -366,11 +370,11 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { if elem.sequenceToken == "" { //This is the first attempt to write to log stream, //need to check log stream existence and create it if necessary - describeLogStreamOutput, err := c.svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{ + describeLogStreamOutput, err := c.svc.DescribeLogStreams(context.Background(), &cloudwatchlogs.DescribeLogStreamsInput{ LogGroupName: &c.LogGroup, LogStreamNamePrefix: &logStream}) if err == nil && len(describeLogStreamOutput.LogStreams) == 0 { - _, err := c.svc.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + _, err := c.svc.CreateLogStream(context.Background(), &cloudwatchlogs.CreateLogStreamInput{ LogGroupName: &c.LogGroup, LogStreamName: &logStream}) if err != nil { @@ -400,14 +404,14 @@ func (c *CloudWatchLogs) Write(metrics []telegraf.Metric) error { //There is a quota of 5 requests per second per log stream. Additional //requests are throttled. This quota can't be changed. - putLogEventsOutput, err := c.svc.PutLogEvents(&putLogEvents) + putLogEventsOutput, err := c.svc.PutLogEvents(context.Background(), &putLogEvents) if err != nil { c.Log.Errorf("Can't push logs batch to AWS. Reason: %v", err) continue } //Cleanup batch elem.messageBatches[index] = messageBatch{ - logEvents: []*cloudwatchlogs.InputLogEvent{}, + logEvents: []types.InputLogEvent{}, messageCount: 0} elem.sequenceToken = *putLogEventsOutput.NextSequenceToken diff --git a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go index f2f9f0cac7f3c..e103eb53d24e6 100644 --- a/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go +++ b/plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go @@ -1,6 +1,7 @@ package cloudwatch_logs import ( + "context" "fmt" "math/rand" "net/http" @@ -8,7 +9,8 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + cloudwatchlogsV2 "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/testutil" @@ -17,24 +19,24 @@ import ( type mockCloudWatchLogs struct { logStreamName string - pushedLogEvents []cloudwatchlogs.InputLogEvent + pushedLogEvents []types.InputLogEvent } func (c *mockCloudWatchLogs) Init(lsName string) { c.logStreamName = lsName - c.pushedLogEvents = make([]cloudwatchlogs.InputLogEvent, 0) + c.pushedLogEvents = make([]types.InputLogEvent, 0) } -func (c *mockCloudWatchLogs) DescribeLogGroups(*cloudwatchlogs.DescribeLogGroupsInput) (*cloudwatchlogs.DescribeLogGroupsOutput, error) { +func (c *mockCloudWatchLogs) DescribeLogGroups(context.Context, *cloudwatchlogsV2.DescribeLogGroupsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogGroupsOutput, error) { return nil, nil } -func (c *mockCloudWatchLogs) DescribeLogStreams(*cloudwatchlogs.DescribeLogStreamsInput) (*cloudwatchlogs.DescribeLogStreamsOutput, error) { +func (c *mockCloudWatchLogs) DescribeLogStreams(context.Context, *cloudwatchlogsV2.DescribeLogStreamsInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.DescribeLogStreamsOutput, error) { arn := "arn" creationTime := time.Now().Unix() sequenceToken := "arbitraryToken" - output := &cloudwatchlogs.DescribeLogStreamsOutput{ - LogStreams: []*cloudwatchlogs.LogStream{ + output := &cloudwatchlogsV2.DescribeLogStreamsOutput{ + LogStreams: []types.LogStream{ { Arn: &arn, CreationTime: &creationTime, @@ -48,15 +50,15 @@ func (c *mockCloudWatchLogs) DescribeLogStreams(*cloudwatchlogs.DescribeLogStrea } return output, nil } -func (c *mockCloudWatchLogs) CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { +func (c *mockCloudWatchLogs) CreateLogStream(context.Context, *cloudwatchlogsV2.CreateLogStreamInput, ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.CreateLogStreamOutput, error) { return nil, nil } -func (c *mockCloudWatchLogs) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { +func (c *mockCloudWatchLogs) PutLogEvents(_ context.Context, input *cloudwatchlogsV2.PutLogEventsInput, _ ...func(options *cloudwatchlogsV2.Options)) (*cloudwatchlogsV2.PutLogEventsOutput, error) { sequenceToken := "arbitraryToken" - output := &cloudwatchlogs.PutLogEventsOutput{NextSequenceToken: &sequenceToken} + output := &cloudwatchlogsV2.PutLogEventsOutput{NextSequenceToken: &sequenceToken} //Saving messages for _, event := range input.LogEvents { - c.pushedLogEvents = append(c.pushedLogEvents, *event) + c.pushedLogEvents = append(c.pushedLogEvents, event) } return output, nil diff --git a/plugins/outputs/dynatrace/README.md b/plugins/outputs/dynatrace/README.md index 666f821f6356c..f25b8708942d6 100644 --- a/plugins/outputs/dynatrace/README.md +++ b/plugins/outputs/dynatrace/README.md @@ -2,10 +2,12 @@ This plugin sends Telegraf metrics to [Dynatrace](https://www.dynatrace.com) via the [Dynatrace Metrics API V2](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/). It may be run alongside the Dynatrace OneAgent for automatic authentication or it may be run standalone on a host without a OneAgent by specifying a URL and API Token. More information on the plugin can be found in the [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/telegraf/). +All metrics are reported as gauges, unless they are specified to be delta counters using the `additional_counters` config option (see below). +See the [Dynatrace Metrics ingestion protocol documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol) for details on the types defined there. ## Requirements -You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. Monotonic counters (e.g. `diskio.reads`, `system.uptime`) require Dynatrace 208 or later. +You will either need a Dynatrace OneAgent (version 1.201 or higher) installed on the same host as Telegraf; or a Dynatrace environment with version 1.202 or higher. - Telegraf minimum version: Telegraf 1.16 @@ -65,7 +67,7 @@ You can learn more about how to use the Dynatrace API [here](https://www.dynatra prefix = "telegraf" ## Flag for skipping the tls certificate check, just for testing purposes, should be false by default insecure_skip_verify = false - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -119,7 +121,7 @@ insecure_skip_verify = false *required*: `false` -If you want to convert values represented as gauges to counters, add the metric names here. +If you want a metric to be treated and reported as a delta counter, add its name to this list. ```toml additional_counters = [ ] diff --git a/plugins/outputs/dynatrace/dynatrace.go b/plugins/outputs/dynatrace/dynatrace.go index c66bc8da2171e..adf74ea48a232 100644 --- a/plugins/outputs/dynatrace/dynatrace.go +++ b/plugins/outputs/dynatrace/dynatrace.go @@ -3,7 +3,7 @@ package dynatrace import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" @@ -69,7 +69,7 @@ const sampleConfig = ` ## Connection timeout, defaults to "5s" if not set. timeout = "5s" - ## If you want to convert values represented as gauges to counters, add the metric names here + ## If you want metrics to be treated and reported as delta counters, add the metric names here additional_counters = [ ] ## Optional dimensions to be added to every metric @@ -122,16 +122,10 @@ func (d *Dynatrace) Write(metrics []telegraf.Metric) error { dims = append(dims, dimensions.NewDimension(tag.Key, tag.Value)) } - metricType := tm.Type() for _, field := range tm.FieldList() { metricName := tm.Name() + "." + field.Key - for _, i := range d.AddCounterMetrics { - if metricName == i { - metricType = telegraf.Counter - } - } - typeOpt := getTypeOption(metricType, field) + typeOpt := d.getTypeOption(tm, field) if typeOpt == nil { // Unsupported type. Log only once per unsupported metric name @@ -215,7 +209,7 @@ func (d *Dynatrace) send(msg string) error { } // print metric line results as info log - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) if err != nil { d.Log.Errorf("Dynatrace error reading response") } @@ -267,15 +261,19 @@ func init() { }) } -func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetric.MetricOption { - if metricType == telegraf.Counter { +func (d *Dynatrace) getTypeOption(metric telegraf.Metric, field *telegraf.Field) dtMetric.MetricOption { + metricName := metric.Name() + "." + field.Key + for _, i := range d.AddCounterMetrics { + if metricName != i { + continue + } switch v := field.Value.(type) { case float64: - return dtMetric.WithFloatCounterValueTotal(v) + return dtMetric.WithFloatCounterValueDelta(v) case uint64: - return dtMetric.WithIntCounterValueTotal(int64(v)) + return dtMetric.WithIntCounterValueDelta(int64(v)) case int64: - return dtMetric.WithIntCounterValueTotal(v) + return dtMetric.WithIntCounterValueDelta(v) default: return nil } @@ -287,7 +285,7 @@ func getTypeOption(metricType telegraf.ValueType, field *telegraf.Field) dtMetri case uint64: return dtMetric.WithIntGaugeValue(int64(v)) case int64: - return dtMetric.WithIntGaugeValue(32) + return dtMetric.WithIntGaugeValue(v) case bool: if v { return dtMetric.WithIntGaugeValue(1) diff --git a/plugins/outputs/dynatrace/dynatrace_test.go b/plugins/outputs/dynatrace/dynatrace_test.go index d9076906c1020..0ed7cf4cf1195 100644 --- a/plugins/outputs/dynatrace/dynatrace_test.go +++ b/plugins/outputs/dynatrace/dynatrace_test.go @@ -2,10 +2,13 @@ package dynatrace import ( "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "net/http/httptest" "regexp" + "sort" + "strings" "testing" "time" @@ -123,26 +126,37 @@ func TestMissingAPIToken(t *testing.T) { } func TestSendMetrics(t *testing.T) { + expected := []string{} + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) - expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000\nmymeasurement.value,dt.metrics.source=telegraf count,3.14 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) + + lines := strings.Split(bodyString, "\n") + + sort.Strings(lines) + sort.Strings(expected) + + expectedString := strings.Join(expected, "\n") + foundString := strings.Join(lines, "\n") + if foundString != expectedString { + t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expectedString, foundString) } w.WriteHeader(http.StatusOK) - err = json.NewEncoder(w).Encode(`{"linesOk":10,"linesInvalid":0,"error":null}`) + err = json.NewEncoder(w).Encode(fmt.Sprintf(`{"linesOk":%d,"linesInvalid":0,"error":null}`, len(lines))) require.NoError(t, err) })) defer ts.Close() - d := &Dynatrace{} + d := &Dynatrace{ + URL: ts.URL, + APIToken: "123", + Log: testutil.Logger{}, + AddCounterMetrics: []string{}, + } - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} err := d.Init() require.NoError(t, err) err = d.Connect() @@ -150,22 +164,43 @@ func TestSendMetrics(t *testing.T) { // Init metrics + // Simple metrics are exported as a gauge unless in additional_counters + expected = append(expected, "simple_metric.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "simple_metric.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "simple_metric.counter") m1 := metric.New( - "mymeasurement", + "simple_metric", map[string]string{}, - map[string]interface{}{"myfield": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) + // Even if Type() returns counter, all metrics are treated as a gauge unless explicitly added to additional_counters + expected = append(expected, "counter_type.value,dt.metrics.source=telegraf gauge,3.14 1289430000000") + expected = append(expected, "counter_type.counter,dt.metrics.source=telegraf count,delta=5 1289430000000") + d.AddCounterMetrics = append(d.AddCounterMetrics, "counter_type.counter") m2 := metric.New( - "mymeasurement", + "counter_type", map[string]string{}, - map[string]interface{}{"value": float64(3.14)}, + map[string]interface{}{"value": float64(3.14), "counter": 5}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), telegraf.Counter, ) - metrics := []telegraf.Metric{m1, m2} + expected = append(expected, "complex_metric.int,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.int64,dt.metrics.source=telegraf gauge,2 1289430000000") + expected = append(expected, "complex_metric.float,dt.metrics.source=telegraf gauge,3 1289430000000") + expected = append(expected, "complex_metric.float64,dt.metrics.source=telegraf gauge,4 1289430000000") + expected = append(expected, "complex_metric.true,dt.metrics.source=telegraf gauge,1 1289430000000") + expected = append(expected, "complex_metric.false,dt.metrics.source=telegraf gauge,0 1289430000000") + m3 := metric.New( + "complex_metric", + map[string]string{}, + map[string]interface{}{"int": 1, "int64": int64(2), "float": 3.0, "float64": float64(4.0), "true": true, "false": false}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + metrics := []telegraf.Metric{m1, m2, m3} err = d.Write(metrics) require.NoError(t, err) @@ -174,7 +209,7 @@ func TestSendMetrics(t *testing.T) { func TestSendSingleMetricWithUnorderedTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because dimension order isn't guaranteed @@ -220,7 +255,7 @@ func TestSendMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) expected := "mymeasurement.myfield,dt.metrics.source=telegraf gauge,3.14 1289430000000" @@ -261,7 +296,7 @@ func TestSendMetricWithUpperCaseTagKeys(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) @@ -308,7 +343,7 @@ func TestSendBooleanMetricWithoutTags(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -349,15 +384,15 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed - require.Equal(t, len(bodyString), 79) + require.Equal(t, len(bodyString), 78) require.Regexp(t, regexp.MustCompile("^mymeasurement.value"), bodyString) require.Regexp(t, regexp.MustCompile("dt.metrics.source=telegraf"), bodyString) require.Regexp(t, regexp.MustCompile("dim=value"), bodyString) - require.Regexp(t, regexp.MustCompile("gauge,32 1289430000000$"), bodyString) + require.Regexp(t, regexp.MustCompile("gauge,2 1289430000000$"), bodyString) err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) require.NoError(t, err) })) @@ -378,7 +413,7 @@ func TestSendMetricWithDefaultDimensions(t *testing.T) { m1 := metric.New( "mymeasurement", map[string]string{}, - map[string]interface{}{"value": 32}, + map[string]interface{}{"value": 2}, time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), ) @@ -392,7 +427,7 @@ func TestMetricDimensionsOverrideDefault(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -435,7 +470,7 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) + bodyBytes, err := io.ReadAll(r.Body) require.NoError(t, err) bodyString := string(bodyBytes) // use regex because field order isn't guaranteed @@ -475,47 +510,6 @@ func TestStaticDimensionsOverrideMetric(t *testing.T) { require.NoError(t, err) } -func TestSendCounterMetricWithoutTags(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - // check the encoded result - bodyBytes, err := ioutil.ReadAll(r.Body) - require.NoError(t, err) - bodyString := string(bodyBytes) - expected := "mymeasurement.value,dt.metrics.source=telegraf gauge,32 1289430000000" - if bodyString != expected { - t.Errorf("Metric encoding failed. expected: %#v but got: %#v", expected, bodyString) - } - err = json.NewEncoder(w).Encode(`{"linesOk":1,"linesInvalid":0,"error":null}`) - require.NoError(t, err) - })) - defer ts.Close() - - d := &Dynatrace{} - - d.URL = ts.URL - d.APIToken = "123" - d.Log = testutil.Logger{} - err := d.Init() - require.NoError(t, err) - err = d.Connect() - require.NoError(t, err) - - // Init metrics - - m1 := metric.New( - "mymeasurement", - map[string]string{}, - map[string]interface{}{"value": 32}, - time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), - ) - - metrics := []telegraf.Metric{m1} - - err = d.Write(metrics) - require.NoError(t, err) -} - var warnfCalledTimes int type loggerStub struct { diff --git a/plugins/outputs/event_hubs/README.md b/plugins/outputs/event_hubs/README.md new file mode 100644 index 0000000000000..c71c06f99e160 --- /dev/null +++ b/plugins/outputs/event_hubs/README.md @@ -0,0 +1,25 @@ +# Azure Event Hubs output plugin + +This plugin for [Azure Event Hubs](https://azure.microsoft.com/en-gb/services/event-hubs/) will send metrics to a single Event Hub within an Event Hubs namespace. Metrics are sent as message batches, each message payload containing one metric object. The messages do not specify a partition key, and will thus be automatically load-balanced (round-robin) across all the Event Hub partitions. + +## Metrics + +The plugin uses the Telegraf serializers to format the metric data sent in the message payloads. You can select any of the supported output formats, although JSON is probably the easiest to integrate with downstream components. + +## Configuration + +```toml +[[ outputs.event_hubs ]] +## The full connection string to the Event Hub (required) +## The shared access key must have "Send" permissions on the target Event Hub. +connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" + +## Client timeout (defaults to 30s) +# timeout = "30s" + +## Data format to output. +## Each data format has its own unique set of configuration options, read +## more about them here: +## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +data_format = "json" +``` diff --git a/plugins/outputs/event_hubs/event_hubs.go b/plugins/outputs/event_hubs/event_hubs.go new file mode 100644 index 0000000000000..3c87a84fb62ce --- /dev/null +++ b/plugins/outputs/event_hubs/event_hubs.go @@ -0,0 +1,148 @@ +package event_hubs + +import ( + "context" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +/* +** Wrapper interface for eventhub.Hub + */ + +type EventHubInterface interface { + GetHub(s string) error + Close(ctx context.Context) error + SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error +} + +type eventHub struct { + hub *eventhub.Hub +} + +func (eh *eventHub) GetHub(s string) error { + hub, err := eventhub.NewHubFromConnectionString(s) + + if err != nil { + return err + } + + eh.hub = hub + + return nil +} + +func (eh *eventHub) Close(ctx context.Context) error { + return eh.hub.Close(ctx) +} + +func (eh *eventHub) SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error { + return eh.hub.SendBatch(ctx, iterator, opts...) +} + +/* End wrapper interface */ + +type EventHubs struct { + Log telegraf.Logger `toml:"-"` + ConnectionString string `toml:"connection_string"` + Timeout config.Duration + + Hub EventHubInterface + serializer serializers.Serializer +} + +const ( + defaultRequestTimeout = time.Second * 30 +) + +func (e *EventHubs) Description() string { + return "Configuration for Event Hubs output plugin" +} + +func (e *EventHubs) SampleConfig() string { + return ` + ## The full connection string to the Event Hub (required) + ## The shared access key must have "Send" permissions on the target Event Hub. + connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" + + ## Client timeout (defaults to 30s) + # timeout = "30s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" +` +} + +func (e *EventHubs) Init() error { + err := e.Hub.GetHub(e.ConnectionString) + + if err != nil { + return err + } + + return nil +} + +func (e *EventHubs) Connect() error { + return nil +} + +func (e *EventHubs) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + err := e.Hub.Close(ctx) + + if err != nil { + return err + } + + return nil +} + +func (e *EventHubs) SetSerializer(serializer serializers.Serializer) { + e.serializer = serializer +} + +func (e *EventHubs) Write(metrics []telegraf.Metric) error { + var events []*eventhub.Event + + for _, metric := range metrics { + payload, err := e.serializer.Serialize(metric) + + if err != nil { + e.Log.Debugf("Could not serialize metric: %v", err) + continue + } + + events = append(events, eventhub.NewEvent(payload)) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + err := e.Hub.SendBatch(ctx, eventhub.NewEventBatchIterator(events...)) + + if err != nil { + return err + } + + return nil +} + +func init() { + outputs.Add("event_hubs", func() telegraf.Output { + return &EventHubs{ + Hub: &eventHub{}, + Timeout: config.Duration(defaultRequestTimeout), + } + }) +} diff --git a/plugins/outputs/event_hubs/event_hubs_test.go b/plugins/outputs/event_hubs/event_hubs_test.go new file mode 100644 index 0000000000000..9b17aef605833 --- /dev/null +++ b/plugins/outputs/event_hubs/event_hubs_test.go @@ -0,0 +1,162 @@ +package event_hubs + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +/* +** Wrapper interface mock for eventhub.Hub + */ + +type mockEventHub struct { + mock.Mock +} + +func (eh *mockEventHub) GetHub(s string) error { + args := eh.Called(s) + return args.Error(0) +} + +func (eh *mockEventHub) Close(ctx context.Context) error { + args := eh.Called(ctx) + return args.Error(0) +} + +func (eh *mockEventHub) SendBatch(ctx context.Context, iterator eventhub.BatchIterator, opts ...eventhub.BatchOption) error { + args := eh.Called(ctx, iterator, opts) + return args.Error(0) +} + +/* End wrapper interface */ + +func TestInitAndWrite(t *testing.T) { + serializer, _ := json.NewSerializer(time.Second, "") + mockHub := &mockEventHub{} + e := &EventHubs{ + Hub: mockHub, + ConnectionString: "mock", + Timeout: config.Duration(time.Second * 5), + serializer: serializer, + } + + mockHub.On("GetHub", mock.Anything).Return(nil).Once() + err := e.Init() + require.NoError(t, err) + mockHub.AssertExpectations(t) + + metrics := testutil.MockMetrics() + + mockHub.On("SendBatch", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + err = e.Write(metrics) + require.NoError(t, err) + mockHub.AssertExpectations(t) +} + +/* +** Integration test (requires an Event Hubs instance) + */ + +func TestInitAndWriteIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("EVENTHUB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable EVENTHUB_CONNECTION_STRING") + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + + // Create a new, empty Event Hub + // NB: for this to work, the connection string needs to grant "Manage" permissions on the root namespace + mHub, err := eventhub.NewHubManagerFromConnectionString(os.Getenv("EVENTHUB_CONNECTION_STRING")) + require.NoError(t, err) + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + name := fmt.Sprintf("testmetrics%05d", r.Intn(10000)) + + entity, err := mHub.Put(ctx, name, eventhub.HubWithPartitionCount(1)) + require.NoError(t, err) + + // Delete the test hub + defer func() { + err := mHub.Delete(ctx, entity.Name) + require.NoError(t, err) + }() + + testHubCS := os.Getenv("EVENTHUB_CONNECTION_STRING") + ";EntityPath=" + entity.Name + + // Configure the plugin to target the newly created hub + serializer, _ := json.NewSerializer(time.Second, "") + + e := &EventHubs{ + Hub: &eventHub{}, + ConnectionString: testHubCS, + Timeout: config.Duration(time.Second * 5), + serializer: serializer, + } + + // Verify that we can connect to Event Hubs + err = e.Init() + require.NoError(t, err) + + // Verify that we can successfully write data to Event Hubs + metrics := testutil.MockMetrics() + err = e.Write(metrics) + require.NoError(t, err) + + /* + ** Verify we can read data back from the test hub + */ + + exit := make(chan string) + + // Create a hub client for receiving + hub, err := eventhub.NewHubFromConnectionString(testHubCS) + require.NoError(t, err) + + // The handler function will pass received messages via the channel + handler := func(ctx context.Context, event *eventhub.Event) error { + exit <- string(event.Data) + return nil + } + + // Set up the receivers + runtimeInfo, err := hub.GetRuntimeInformation(ctx) + require.NoError(t, err) + + for _, partitionID := range runtimeInfo.PartitionIDs { + _, err := hub.Receive(ctx, partitionID, handler, eventhub.ReceiveWithStartingOffset("-1")) + require.NoError(t, err) + } + + // Wait to receive the same number of messages sent, with timeout + received := 0 +wait: + for _, metric := range metrics { + select { + case m := <-exit: + t.Logf("Received for %s: %s", metric.Name(), m) + received = received + 1 + case <-time.After(10 * time.Second): + t.Logf("Timeout") + break wait + } + } + + // Make sure received == sent + require.Equal(t, received, len(metrics)) +} diff --git a/plugins/outputs/file/file_test.go b/plugins/outputs/file/file_test.go index f1e87853d6153..5fcdc511972ac 100644 --- a/plugins/outputs/file/file_test.go +++ b/plugins/outputs/file/file_test.go @@ -3,7 +3,6 @@ package file import ( "bytes" "io" - "io/ioutil" "os" "testing" @@ -181,7 +180,7 @@ func TestFileStdout(t *testing.T) { } func createFile() *os.File { - f, err := ioutil.TempFile("", "") + f, err := os.CreateTemp("", "") if err != nil { panic(err) } @@ -190,7 +189,7 @@ func createFile() *os.File { } func tmpFile() string { - d, err := ioutil.TempDir("", "") + d, err := os.MkdirTemp("", "") if err != nil { panic(err) } @@ -198,7 +197,7 @@ func tmpFile() string { } func validateFile(fname, expS string, t *testing.T) { - buf, err := ioutil.ReadFile(fname) + buf, err := os.ReadFile(fname) if err != nil { panic(err) } diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 4945ce46f84f0..96e290b09f5a6 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -8,11 +8,29 @@ This plugin writes to a Graylog instance using the "[GELF][]" format. ```toml [[outputs.graylog]] - ## UDP endpoint for your graylog instances. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" + + ## According to GELF payload specification, additional fields names must be prefixed + ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. + ## Set to true for backward compatibility. + # name_field_no_prefix = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` + +Server endpoint may be specified without UDP or TCP scheme (eg. "127.0.0.1:12201"). +In such case, UDP protocol is assumed. TLS config is ignored for UDP endpoints. diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 05feafe9effc1..16b744f35ccdc 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/zlib" "crypto/rand" + "crypto/tls" "encoding/binary" ejson "encoding/json" "fmt" @@ -11,55 +12,94 @@ import ( "math" "net" "os" + "strings" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/outputs" ) const ( - defaultGraylogEndpoint = "127.0.0.1:12201" + defaultEndpoint = "127.0.0.1:12201" defaultConnection = "wan" defaultMaxChunkSizeWan = 1420 defaultMaxChunkSizeLan = 8154 + defaultScheme = "udp" + defaultTimeout = 5 * time.Second ) -type GelfConfig struct { - GraylogEndpoint string +type gelfConfig struct { + Endpoint string Connection string MaxChunkSizeWan int MaxChunkSizeLan int } -type Gelf struct { - GelfConfig +type gelf interface { + io.WriteCloser + Connect() error } -func NewGelfWriter(config GelfConfig) *Gelf { - if config.GraylogEndpoint == "" { - config.GraylogEndpoint = defaultGraylogEndpoint +type gelfCommon struct { + gelfConfig + dialer *net.Dialer + conn net.Conn +} + +type gelfUDP struct { + gelfCommon +} + +type gelfTCP struct { + gelfCommon + tlsConfig *tls.Config +} + +func newGelfWriter(cfg gelfConfig, dialer *net.Dialer, tlsConfig *tls.Config) gelf { + if cfg.Endpoint == "" { + cfg.Endpoint = defaultEndpoint } - if config.Connection == "" { - config.Connection = defaultConnection + if cfg.Connection == "" { + cfg.Connection = defaultConnection } - if config.MaxChunkSizeWan == 0 { - config.MaxChunkSizeWan = defaultMaxChunkSizeWan + if cfg.MaxChunkSizeWan == 0 { + cfg.MaxChunkSizeWan = defaultMaxChunkSizeWan } - if config.MaxChunkSizeLan == 0 { - config.MaxChunkSizeLan = defaultMaxChunkSizeLan + if cfg.MaxChunkSizeLan == 0 { + cfg.MaxChunkSizeLan = defaultMaxChunkSizeLan } - g := &Gelf{GelfConfig: config} + scheme := defaultScheme + parts := strings.SplitN(cfg.Endpoint, "://", 2) + if len(parts) == 2 { + scheme = strings.ToLower(parts[0]) + cfg.Endpoint = parts[1] + } + common := gelfCommon{ + gelfConfig: cfg, + dialer: dialer, + } + + var g gelf + switch scheme { + case "tcp": + g = &gelfTCP{gelfCommon: common, tlsConfig: tlsConfig} + default: + g = &gelfUDP{gelfCommon: common} + } return g } -func (g *Gelf) Write(message []byte) (n int, err error) { +func (g *gelfUDP) Write(message []byte) (n int, err error) { compressed := g.compress(message) - chunksize := g.GelfConfig.MaxChunkSizeWan + chunksize := g.gelfConfig.MaxChunkSizeWan length := compressed.Len() if length > chunksize { @@ -84,10 +124,19 @@ func (g *Gelf) Write(message []byte) (n int, err error) { n = len(message) - return + return n, nil +} + +func (g *gelfUDP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err } -func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { +func (g *gelfUDP) createChunkedMessage(index int, chunkCountInt int, id []byte, compressed *bytes.Buffer) bytes.Buffer { var packet bytes.Buffer chunksize := g.getChunksize() @@ -104,26 +153,26 @@ func (g *Gelf) createChunkedMessage(index int, chunkCountInt int, id []byte, com return packet } -func (g *Gelf) getChunksize() int { - if g.GelfConfig.Connection == "wan" { - return g.GelfConfig.MaxChunkSizeWan +func (g *gelfUDP) getChunksize() int { + if g.gelfConfig.Connection == "wan" { + return g.gelfConfig.MaxChunkSizeWan } - if g.GelfConfig.Connection == "lan" { - return g.GelfConfig.MaxChunkSizeLan + if g.gelfConfig.Connection == "lan" { + return g.gelfConfig.MaxChunkSizeLan } - return g.GelfConfig.MaxChunkSizeWan + return g.gelfConfig.MaxChunkSizeWan } -func (g *Gelf) intToBytes(i int) []byte { +func (g *gelfUDP) intToBytes(i int) []byte { buf := new(bytes.Buffer) binary.Write(buf, binary.LittleEndian, int8(i)) return buf.Bytes() } -func (g *Gelf) compress(b []byte) bytes.Buffer { +func (g *gelfUDP) compress(b []byte) bytes.Buffer { var buf bytes.Buffer comp := zlib.NewWriter(&buf) @@ -133,47 +182,147 @@ func (g *Gelf) compress(b []byte) bytes.Buffer { return buf } -func (g *Gelf) send(b []byte) error { - udpAddr, err := net.ResolveUDPAddr("udp", g.GelfConfig.GraylogEndpoint) +func (g *gelfUDP) Connect() error { + conn, err := g.dialer.Dial("udp", g.gelfConfig.Endpoint) if err != nil { return err } + g.conn = conn + return nil +} + +func (g *gelfUDP) send(b []byte) error { + if g.conn == nil { + err := g.Connect() + if err != nil { + return err + } + } - conn, err := net.DialUDP("udp", nil, udpAddr) + _, err := g.conn.Write(b) + if err != nil { + _ = g.conn.Close() + g.conn = nil + } + + return err +} + +func (g *gelfTCP) Write(message []byte) (n int, err error) { + err = g.send(message) + if err != nil { + return 0, err + } + + n = len(message) + + return n, nil +} + +func (g *gelfTCP) Close() (err error) { + if g.conn != nil { + err = g.conn.Close() + g.conn = nil + } + + return err +} + +func (g *gelfTCP) Connect() error { + var err error + var conn net.Conn + if g.tlsConfig == nil { + conn, err = g.dialer.Dial("tcp", g.gelfConfig.Endpoint) + } else { + conn, err = tls.DialWithDialer(g.dialer, "tcp", g.gelfConfig.Endpoint, g.tlsConfig) + } if err != nil { return err } + g.conn = conn + return nil +} + +func (g *gelfTCP) send(b []byte) error { + if g.conn == nil { + err := g.Connect() + if err != nil { + return err + } + } + + _, err := g.conn.Write(b) + if err != nil { + _ = g.conn.Close() + g.conn = nil + } else { + _, err = g.conn.Write([]byte{0}) // message delimiter + if err != nil { + _ = g.conn.Close() + g.conn = nil + } + } - _, err = conn.Write(b) return err } type Graylog struct { - Servers []string `toml:"servers"` - ShortMessageField string `toml:"short_message_field"` - writer io.Writer + Servers []string `toml:"servers"` + ShortMessageField string `toml:"short_message_field"` + NameFieldNoPrefix bool `toml:"name_field_noprefix"` + Timeout config.Duration `toml:"timeout"` + tlsint.ClientConfig + + writer io.Writer + closers []io.WriteCloser } var sampleConfig = ` - ## UDP endpoint for your graylog instance. - servers = ["127.0.0.1:12201"] + ## Endpoints for your graylog instances. + servers = ["udp://127.0.0.1:12201"] + + ## Connection timeout. + # timeout = "5s" ## The field to use as the GELF short_message, if unset the static string ## "telegraf" will be used. ## example: short_message_field = "message" # short_message_field = "" + + ## According to GELF payload specification, additional fields names must be prefixed + ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. + ## Set to true for backward compatibility. + # name_field_no_prefix = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (g *Graylog) Connect() error { - writers := []io.Writer{} + var writers []io.Writer + dialer := &net.Dialer{Timeout: time.Duration(g.Timeout)} if len(g.Servers) == 0 { g.Servers = append(g.Servers, "localhost:12201") } + tlsCfg, err := g.ClientConfig.TLSConfig() + if err != nil { + return err + } + for _, server := range g.Servers { - w := NewGelfWriter(GelfConfig{GraylogEndpoint: server}) + w := newGelfWriter(gelfConfig{Endpoint: server}, dialer, tlsCfg) + err := w.Connect() + if err != nil { + return fmt.Errorf("failed to connect to server [%s]: %v", server, err) + } writers = append(writers, w) + g.closers = append(g.closers, w) } g.writer = io.MultiWriter(writers...) @@ -181,6 +330,9 @@ func (g *Graylog) Connect() error { } func (g *Graylog) Close() error { + for _, closer := range g.closers { + _ = closer.Close() + } return nil } @@ -214,9 +366,13 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { m := make(map[string]interface{}) m["version"] = "1.1" - m["timestamp"] = metric.Time().UnixNano() / 1000000000 + m["timestamp"] = float64(metric.Time().UnixNano()) / 1_000_000_000 m["short_message"] = "telegraf" - m["name"] = metric.Name() + if g.NameFieldNoPrefix { + m["name"] = metric.Name() + } else { + m["_name"] = metric.Name() + } if host, ok := metric.GetTag("host"); ok { m["host"] = host @@ -253,6 +409,8 @@ func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { func init() { outputs.Add("graylog", func() telegraf.Output { - return &Graylog{} + return &Graylog{ + Timeout: config.Duration(defaultTimeout), + } }) } diff --git a/plugins/outputs/graylog/graylog_test.go b/plugins/outputs/graylog/graylog_test.go index 37816a7a2c4b3..3932c736c2aff 100644 --- a/plugins/outputs/graylog/graylog_test.go +++ b/plugins/outputs/graylog/graylog_test.go @@ -3,56 +3,258 @@ package graylog import ( "bytes" "compress/zlib" + "crypto/tls" "encoding/json" "io" "net" "sync" "testing" + "time" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestWrite(t *testing.T) { - var wg sync.WaitGroup - var wg2 sync.WaitGroup - wg.Add(1) - wg2.Add(1) - go UDPServer(t, &wg, &wg2) - wg2.Wait() +func TestWriteUDP(t *testing.T) { + tests := []struct { + name string + instance Graylog + }{ + { + name: "default without scheme", + instance: Graylog{ + Servers: []string{"127.0.0.1:12201"}, + }, + }, + { + name: "UDP", + instance: Graylog{ + Servers: []string{"udp://127.0.0.1:12201"}, + }, + }, + { + name: "UDP non-standard name field", + instance: Graylog{ + Servers: []string{"udp://127.0.0.1:12201"}, + NameFieldNoPrefix: true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + go UDPServer(t, &wg, &wg2, &tt.instance) + wg2.Wait() + + i := tt.instance + err := i.Connect() + require.NoError(t, err) + defer i.Close() + defer wg.Wait() + + metrics := testutil.MockMetrics() + + // UDP scenario: + // 4 messages are send + + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + }) + } +} + +func TestWriteTCP(t *testing.T) { + pki := testutil.NewPKI("../../../testutil/pki") + tlsClientConfig := pki.TLSClientConfig() + tlsServerConfig, err := pki.TLSServerConfig().TLSConfig() + require.NoError(t, err) - i := Graylog{ - Servers: []string{"127.0.0.1:12201"}, + tests := []struct { + name string + instance Graylog + tlsServerConfig *tls.Config + }{ + { + name: "TCP", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + }, + }, + { + name: "TLS", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + ClientConfig: tlsint.ClientConfig{ + ServerName: "localhost", + TLSCA: tlsClientConfig.TLSCA, + TLSKey: tlsClientConfig.TLSKey, + TLSCert: tlsClientConfig.TLSCert, + }, + }, + tlsServerConfig: tlsServerConfig, + }, + { + name: "TLS no validation", + instance: Graylog{ + Servers: []string{"tcp://127.0.0.1:12201"}, + ClientConfig: tlsint.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "localhost", + TLSKey: tlsClientConfig.TLSKey, + TLSCert: tlsClientConfig.TLSCert, + }, + }, + tlsServerConfig: tlsServerConfig, + }, } - i.Connect() - metrics := testutil.MockMetrics() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var wg sync.WaitGroup + var wg2 sync.WaitGroup + var wg3 sync.WaitGroup + wg.Add(1) + wg2.Add(1) + wg3.Add(1) + go TCPServer(t, &wg, &wg2, &wg3, tt.tlsServerConfig) + wg2.Wait() + + i := tt.instance + err = i.Connect() + require.NoError(t, err) + defer i.Close() + defer wg.Wait() + + metrics := testutil.MockMetrics() - i.Write(metrics) + // TCP scenario: + // 4 messages are send + // -> connection gets forcefully broken after the 2nd message (server closes connection) + // -> the 3rd write fails with error + // -> during the 4th write connection is restored and write is successful - wg.Wait() - i.Close() + err = i.Write(metrics) + require.NoError(t, err) + err = i.Write(metrics) + require.NoError(t, err) + wg3.Wait() + err = i.Write(metrics) + require.Error(t, err) + err = i.Write(metrics) + require.NoError(t, err) + }) + } } type GelfObject map[string]interface{} -func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup) { - serverAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:12201") - udpServer, _ := net.ListenUDP("udp", serverAddr) +func UDPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, config *Graylog) { + serverAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:12201") + require.NoError(t, err) + udpServer, err := net.ListenUDP("udp", serverAddr) + require.NoError(t, err) + defer udpServer.Close() defer wg.Done() + wg2.Done() + + recv := func() { + bufR := make([]byte, 1024) + n, _, err := udpServer.ReadFromUDP(bufR) + require.NoError(t, err) - bufR := make([]byte, 1024) + b := bytes.NewReader(bufR[0:n]) + r, _ := zlib.NewReader(b) + + bufW := bytes.NewBuffer(nil) + _, _ = io.Copy(bufW, r) + _ = r.Close() + + var obj GelfObject + _ = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["short_message"], "telegraf") + if config.NameFieldNoPrefix { + assert.Equal(t, obj["name"], "test1") + } else { + assert.Equal(t, obj["_name"], "test1") + } + assert.Equal(t, obj["_tag1"], "value1") + assert.Equal(t, obj["_value"], float64(1)) + } + + // in UDP scenario all 4 messages are received + + recv() + recv() + recv() + recv() +} + +func TCPServer(t *testing.T, wg *sync.WaitGroup, wg2 *sync.WaitGroup, wg3 *sync.WaitGroup, tlsConfig *tls.Config) { + tcpServer, err := net.Listen("tcp", "127.0.0.1:12201") + require.NoError(t, err) + defer tcpServer.Close() + defer wg.Done() wg2.Done() - n, _, _ := udpServer.ReadFromUDP(bufR) - b := bytes.NewReader(bufR[0:n]) - r, _ := zlib.NewReader(b) + accept := func() net.Conn { + conn, err := tcpServer.Accept() + require.NoError(t, err) + if tcpConn, ok := conn.(*net.TCPConn); ok { + _ = tcpConn.SetLinger(0) + } + _ = conn.SetDeadline(time.Now().Add(15 * time.Second)) + if tlsConfig != nil { + conn = tls.Server(conn, tlsConfig) + } + return conn + } + + recv := func(conn net.Conn) { + bufR := make([]byte, 1) + bufW := bytes.NewBuffer(nil) + for { + n, err := conn.Read(bufR) + require.NoError(t, err) + if n > 0 { + if bufR[0] == 0 { // message delimiter found + break + } + _, _ = bufW.Write(bufR) + } + } + + var obj GelfObject + err = json.Unmarshal(bufW.Bytes(), &obj) + require.NoError(t, err) + assert.Equal(t, obj["short_message"], "telegraf") + assert.Equal(t, obj["_name"], "test1") + assert.Equal(t, obj["_tag1"], "value1") + assert.Equal(t, obj["_value"], float64(1)) + } + + conn := accept() + defer conn.Close() - bufW := bytes.NewBuffer(nil) - io.Copy(bufW, r) - r.Close() + // in TCP scenario only 3 messages are received, the 3rd is lost due to simulated connection break after the 2nd - var obj GelfObject - json.Unmarshal(bufW.Bytes(), &obj) - assert.Equal(t, obj["_value"], float64(1)) + recv(conn) + recv(conn) + _ = conn.Close() + wg3.Done() + conn = accept() + defer conn.Close() + recv(conn) } diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go index f03cfcacba7a6..03a08fca21e7b 100644 --- a/plugins/outputs/health/health_test.go +++ b/plugins/outputs/health/health_test.go @@ -1,7 +1,7 @@ package health_test import ( - "io/ioutil" + "io" "net/http" "testing" "time" @@ -121,7 +121,7 @@ func TestHealth(t *testing.T) { require.NoError(t, err) require.Equal(t, tt.expectedCode, resp.StatusCode) - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) require.NoError(t, err) err = output.Close() diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 83faef0dae241..c94052ea92c1c 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -1,11 +1,11 @@ package http import ( + "bufio" "bytes" "context" "fmt" "io" - "io/ioutil" "net/http" "strings" "time" @@ -18,7 +18,8 @@ import ( ) const ( - defaultURL = "http://127.0.0.1:8080/telegraf" + maxErrMsgLen = 1024 + defaultURL = "http://127.0.0.1:8080/telegraf" ) var sampleConfig = ` @@ -182,11 +183,18 @@ func (h *HTTP) write(reqBody []byte) error { return err } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("when writing to [%s] received status code: %d", h.URL, resp.StatusCode) + errorLine := "" + scanner := bufio.NewScanner(io.LimitReader(resp.Body, maxErrMsgLen)) + if scanner.Scan() { + errorLine = scanner.Text() + } + + return fmt.Errorf("when writing to [%s] received status code: %d. body: %s", h.URL, resp.StatusCode, errorLine) } + + _, err = io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("when writing to [%s] received error: %v", h.URL, err) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 8089f45f59f2e..d6803eed3211d 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -3,7 +3,7 @@ package http import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -272,7 +272,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) require.Contains(t, string(payload), "cpu value=42") diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index cd1b36a723aeb..36fde827e176a 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -85,8 +85,7 @@ The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP ser ``` ### Metrics - Reference the [influx serializer][] for details about metric production. - + [InfluxDB v1.x]: https://github.com/influxdata/influxdb [influx serializer]: /plugins/serializers/influx/README.md#Metrics diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 5c11d2821d2f1..ac85814db1f34 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -489,7 +488,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { @@ -503,13 +502,13 @@ func (c *httpClient) addHeaders(req *http.Request) { } func (c *httpClient) validateResponse(response io.ReadCloser) (io.ReadCloser, error) { - bodyBytes, err := ioutil.ReadAll(response) + bodyBytes, err := io.ReadAll(response) if err != nil { return nil, err } defer response.Close() - originalResponse := ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + originalResponse := io.NopCloser(bytes.NewBuffer(bodyBytes)) // Empty response is valid. if response == http.NoBody || len(bodyBytes) == 0 || bodyBytes == nil { diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index e19d8d2e580c9..ba4dd2d81b12a 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -6,7 +6,7 @@ import ( "compress/gzip" "context" "fmt" - "io/ioutil" + "io" "log" "net" "net/http" @@ -284,7 +284,7 @@ func TestHTTP_Write(t *testing.T) { }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -573,7 +573,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -618,7 +618,7 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { } func TestHTTP_UnixSocket(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf-test") + tmpdir, err := os.MkdirTemp("", "telegraf-test") if err != nil { require.NoError(t, err) } @@ -700,7 +700,7 @@ func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["db"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") @@ -835,7 +835,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) @@ -917,7 +917,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") w.WriteHeader(http.StatusNoContent) @@ -948,7 +948,7 @@ func TestDBRPTags(t *testing.T) { handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") require.Equal(t, r.FormValue("rp"), "foo") - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu,rp=foo value=42") w.WriteHeader(http.StatusNoContent) diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go index e62919cf43b13..c076580255740 100644 --- a/plugins/outputs/influxdb_v2/http.go +++ b/plugins/outputs/influxdb_v2/http.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "log" "math" "net" @@ -36,8 +35,9 @@ func (e APIError) Error() string { } const ( - defaultRequestTimeout = time.Second * 5 - defaultMaxWait = 60 // seconds + defaultRequestTimeout = time.Second * 5 + defaultMaxWaitSeconds = 60 + defaultMaxWaitRetryAfterSeconds = 10 * 60 ) type HTTPConfig struct { @@ -306,8 +306,9 @@ func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []te // retryDuration takes the longer of the Retry-After header and our own back-off calculation func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // basic exponential backoff (x^2)/40 (denominator to widen the slope) - // at 40 denominator, it'll take 35 retries to hit the max defaultMaxWait of 30s + // at 40 denominator, it'll take 49 retries to hit the max defaultMaxWait of 60s backoff := math.Pow(float64(c.retryCount), 2) / 40 + backoff = math.Min(backoff, defaultMaxWaitSeconds) // get any value from the header, if available retryAfterHeader := float64(0) @@ -319,11 +320,12 @@ func (c *httpClient) getRetryDuration(headers http.Header) time.Duration { // there was a value but we couldn't parse it? guess minimum 10 sec retryAfterHeader = 10 } + // protect against excessively large retry-after + retryAfterHeader = math.Min(retryAfterHeader, defaultMaxWaitRetryAfterSeconds) } - // take the highest value from both, but not over the max wait. + // take the highest value of backoff and retry-after. retry := math.Max(backoff, retryAfterHeader) - retry = math.Min(retry, defaultMaxWait) - return time.Duration(retry) * time.Second + return time.Duration(retry*1000) * time.Millisecond } func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { @@ -358,7 +360,7 @@ func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser return rc, nil } - return ioutil.NopCloser(reader), nil + return io.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go index 2ff4990fa8a3b..10e2a4e133eeb 100644 --- a/plugins/outputs/influxdb_v2/http_internal_test.go +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -56,12 +56,12 @@ func TestExponentialBackoffCalculation(t *testing.T) { expected time.Duration }{ {retryCount: 0, expected: 0}, - {retryCount: 1, expected: 0}, - {retryCount: 5, expected: 0}, - {retryCount: 10, expected: 2 * time.Second}, - {retryCount: 30, expected: 22 * time.Second}, + {retryCount: 1, expected: 25 * time.Millisecond}, + {retryCount: 5, expected: 625 * time.Millisecond}, + {retryCount: 10, expected: 2500 * time.Millisecond}, + {retryCount: 30, expected: 22500 * time.Millisecond}, {retryCount: 40, expected: 40 * time.Second}, - {retryCount: 50, expected: 60 * time.Second}, + {retryCount: 50, expected: 60 * time.Second}, // max hit {retryCount: 100, expected: 60 * time.Second}, {retryCount: 1000, expected: 60 * time.Second}, } @@ -72,3 +72,29 @@ func TestExponentialBackoffCalculation(t *testing.T) { }) } } + +func TestExponentialBackoffCalculationWithRetryAfter(t *testing.T) { + c := &httpClient{} + tests := []struct { + retryCount int + retryAfter string + expected time.Duration + }{ + {retryCount: 0, retryAfter: "0", expected: 0}, + {retryCount: 0, retryAfter: "10", expected: 10 * time.Second}, + {retryCount: 0, retryAfter: "60", expected: 60 * time.Second}, + {retryCount: 0, retryAfter: "600", expected: 600 * time.Second}, + {retryCount: 0, retryAfter: "601", expected: 600 * time.Second}, // max hit + {retryCount: 40, retryAfter: "39", expected: 40 * time.Second}, // retryCount wins + {retryCount: 40, retryAfter: "41", expected: 41 * time.Second}, // retryAfter wins + {retryCount: 100, retryAfter: "100", expected: 100 * time.Second}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_retries", test.retryCount), func(t *testing.T) { + c.retryCount = test.retryCount + hdr := http.Header{} + hdr.Add("Retry-After", test.retryAfter) + require.EqualValues(t, test.expected, c.getRetryDuration(hdr)) + }) + } +} diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go index 23c3ff05e17b6..0637cd8060bd0 100644 --- a/plugins/outputs/influxdb_v2/http_test.go +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -2,7 +2,7 @@ package influxdb_v2_test import ( "context" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -63,7 +63,7 @@ func TestWriteBucketTagWorksOnRetry(t *testing.T) { r.ParseForm() require.Equal(t, r.Form["bucket"], []string{"foo"}) - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Contains(t, string(body), "cpu value=42") diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index e76522018fb4a..54108d8be4398 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -136,6 +136,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index d30c730cfac18..2972427001ef5 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -212,6 +212,9 @@ var sampleConfig = ` ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index 2e75788400ae0..56858340887f5 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -1,11 +1,12 @@ package kinesis import ( + "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -26,7 +27,7 @@ type ( Log telegraf.Logger `toml:"-"` serializer serializers.Serializer - svc kinesisiface.KinesisAPI + svc kinesisClient internalaws.CredentialConfig } @@ -38,6 +39,10 @@ type ( } ) +type kinesisClient interface { + PutRecords(context.Context, *kinesis.PutRecordsInput, ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) +} + var sampleConfig = ` ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" @@ -126,9 +131,14 @@ func (k *KinesisOutput) Connect() error { k.Log.Infof("Establishing a connection to Kinesis in %s", k.Region) } - svc := kinesis.New(k.CredentialConfig.Credentials()) + cfg, err := k.CredentialConfig.Credentials() + if err != nil { + return err + } + + svc := kinesis.NewFromConfig(cfg) - _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + _, err = svc.DescribeStreamSummary(context.Background(), &kinesis.DescribeStreamSummaryInput{ StreamName: aws.String(k.StreamName), }) k.svc = svc @@ -143,14 +153,14 @@ func (k *KinesisOutput) SetSerializer(serializer serializers.Serializer) { k.serializer = serializer } -func (k *KinesisOutput) writeKinesis(r []*kinesis.PutRecordsRequestEntry) time.Duration { +func (k *KinesisOutput) writeKinesis(r []types.PutRecordsRequestEntry) time.Duration { start := time.Now() payload := &kinesis.PutRecordsInput{ Records: r, StreamName: aws.String(k.StreamName), } - resp, err := k.svc.PutRecords(payload) + resp, err := k.svc.PutRecords(context.Background(), payload) if err != nil { k.Log.Errorf("Unable to write to Kinesis : %s", err.Error()) return time.Since(start) @@ -210,7 +220,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { return nil } - r := []*kinesis.PutRecordsRequestEntry{} + r := []types.PutRecordsRequestEntry{} for _, metric := range metrics { sz++ @@ -223,12 +233,12 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { partitionKey := k.getPartitionKey(metric) - d := kinesis.PutRecordsRequestEntry{ + d := types.PutRecordsRequestEntry{ Data: values, PartitionKey: aws.String(partitionKey), } - r = append(r, &d) + r = append(r, d) if sz == maxRecordsPerRequest { elapsed := k.writeKinesis(r) diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 22b8e83e48e24..89724ef1805d2 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -1,13 +1,13 @@ package kinesis import ( + "context" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/gofrs/uuid" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/serializers" @@ -110,7 +110,7 @@ func TestPartitionKey(t *testing.T) { func TestWriteKinesis_WhenSuccess(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{0x65}, @@ -120,7 +120,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupResponse( 0, - []*kinesis.PutRecordsResultEntry{ + []types.PutRecordsResultEntry{ { SequenceNumber: aws.String(testSequenceNumber), ShardId: aws.String(testShardID), @@ -148,7 +148,7 @@ func TestWriteKinesis_WhenSuccess(t *testing.T) { func TestWriteKinesis_WhenRecordErrors(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{0x66}, @@ -158,7 +158,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupResponse( 1, - []*kinesis.PutRecordsResultEntry{ + []types.PutRecordsResultEntry{ { ErrorCode: aws.String("InternalFailure"), ErrorMessage: aws.String("Internal Service Failure"), @@ -186,7 +186,7 @@ func TestWriteKinesis_WhenRecordErrors(t *testing.T) { func TestWriteKinesis_WhenServiceError(t *testing.T) { assert := assert.New(t) - records := []*kinesis.PutRecordsRequestEntry{ + records := []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: []byte{}, @@ -195,7 +195,7 @@ func TestWriteKinesis_WhenServiceError(t *testing.T) { svc := &mockKinesisPutRecords{} svc.SetupErrorResponse( - awserr.New("InvalidArgumentException", "Invalid record", nil), + &types.InvalidArgumentException{Message: aws.String("Invalid record")}, ) k := KinesisOutput{ @@ -262,7 +262,7 @@ func TestWrite_SingleMetric(t *testing.T) { svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { StreamName: aws.String(testStreamName), - Records: []*kinesis.PutRecordsRequestEntry{ + Records: []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: metricData, @@ -449,7 +449,7 @@ func TestWrite_SerializerError(t *testing.T) { svc.AssertRequests(t, []*kinesis.PutRecordsInput{ { StreamName: aws.String(testStreamName), - Records: []*kinesis.PutRecordsRequestEntry{ + Records: []types.PutRecordsRequestEntry{ { PartitionKey: aws.String(testPartitionKey), Data: metric1Data, @@ -469,20 +469,18 @@ type mockKinesisPutRecordsResponse struct { } type mockKinesisPutRecords struct { - kinesisiface.KinesisAPI - requests []*kinesis.PutRecordsInput responses []*mockKinesisPutRecordsResponse } func (m *mockKinesisPutRecords) SetupResponse( - failedRecordCount int64, - records []*kinesis.PutRecordsResultEntry, + failedRecordCount int32, + records []types.PutRecordsResultEntry, ) { m.responses = append(m.responses, &mockKinesisPutRecordsResponse{ Err: nil, Output: &kinesis.PutRecordsOutput{ - FailedRecordCount: aws.Int64(failedRecordCount), + FailedRecordCount: aws.Int32(failedRecordCount), Records: records, }, }) @@ -490,25 +488,25 @@ func (m *mockKinesisPutRecords) SetupResponse( func (m *mockKinesisPutRecords) SetupGenericResponse( successfulRecordCount uint32, - failedRecordCount uint32, + failedRecordCount int32, ) { - records := []*kinesis.PutRecordsResultEntry{} + records := []types.PutRecordsResultEntry{} for i := uint32(0); i < successfulRecordCount; i++ { - records = append(records, &kinesis.PutRecordsResultEntry{ + records = append(records, types.PutRecordsResultEntry{ SequenceNumber: aws.String(testSequenceNumber), ShardId: aws.String(testShardID), }) } - for i := uint32(0); i < failedRecordCount; i++ { - records = append(records, &kinesis.PutRecordsResultEntry{ + for i := int32(0); i < failedRecordCount; i++ { + records = append(records, types.PutRecordsResultEntry{ ErrorCode: aws.String("InternalFailure"), ErrorMessage: aws.String("Internal Service Failure"), }) } - m.SetupResponse(int64(failedRecordCount), records) + m.SetupResponse(failedRecordCount, records) } func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { @@ -518,7 +516,7 @@ func (m *mockKinesisPutRecords) SetupErrorResponse(err error) { }) } -func (m *mockKinesisPutRecords) PutRecords(input *kinesis.PutRecordsInput) (*kinesis.PutRecordsOutput, error) { +func (m *mockKinesisPutRecords) PutRecords(_ context.Context, input *kinesis.PutRecordsInput, _ ...func(*kinesis.Options)) (*kinesis.PutRecordsOutput, error) { reqNum := len(m.requests) if reqNum > len(m.responses) { return nil, fmt.Errorf("Response for request %+v not setup", reqNum) @@ -612,12 +610,12 @@ func createTestMetrics( func createPutRecordsRequestEntries( metricsData [][]byte, -) []*kinesis.PutRecordsRequestEntry { +) []types.PutRecordsRequestEntry { count := len(metricsData) - records := make([]*kinesis.PutRecordsRequestEntry, count) + records := make([]types.PutRecordsRequestEntry, count) for i := 0; i < count; i++ { - records[i] = &kinesis.PutRecordsRequestEntry{ + records[i] = types.PutRecordsRequestEntry{ PartitionKey: aws.String(testPartitionKey), Data: metricsData[i], } diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index d4aa3e6e92bb7..dc1e9b6fa7856 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "time" @@ -151,7 +151,7 @@ func (l *Librato) Write(metrics []telegraf.Metric) error { defer resp.Body.Close() if resp.StatusCode != 200 || l.Debug { - htmlData, err := ioutil.ReadAll(resp.Body) + htmlData, err := io.ReadAll(resp.Body) if err != nil { l.Log.Debugf("Couldn't get response! (%v)", err) } diff --git a/plugins/outputs/loki/loki.go b/plugins/outputs/loki/loki.go index 2f920ec829e3b..07d4d473bf396 100644 --- a/plugins/outputs/loki/loki.go +++ b/plugins/outputs/loki/loki.go @@ -57,7 +57,7 @@ type Loki struct { Timeout config.Duration `toml:"timeout"` Username string `toml:"username"` Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + Headers map[string]string `toml:"http_headers"` ClientID string `toml:"client_id"` ClientSecret string `toml:"client_secret"` TokenURL string `toml:"token_url"` diff --git a/plugins/outputs/loki/loki_test.go b/plugins/outputs/loki/loki_test.go index efe31728218d7..ba6d0808fabaa 100644 --- a/plugins/outputs/loki/loki_test.go +++ b/plugins/outputs/loki/loki_test.go @@ -4,14 +4,15 @@ import ( "compress/gzip" "encoding/json" "fmt" - "github.com/influxdata/telegraf/testutil" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/require" @@ -215,7 +216,7 @@ func TestContentEncodingGzip(t *testing.T) { require.NoError(t, err) } - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request @@ -394,7 +395,7 @@ func TestMetricSorting(t *testing.T) { body := r.Body var err error - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) var s Request diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index abb770f068d4f..f82d7597c5bea 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -40,6 +40,12 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## When true, messages will have RETAIN flag set. # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. # data_format = "influx" ``` @@ -62,3 +68,4 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt * `batch`: When true, metrics will be sent in one MQTT message per flush. Otherwise, metrics are written one metric per MQTT message. * `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) +* `keep_alive`: Defines the maximum length of time that the broker and client may not communicate with each other. Defaults to 0 which deactivates this feature. diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 584a79ffd2ef1..54203ee0dba66 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -16,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/serializers" ) +const ( + defaultKeepAlive = 0 +) + var sampleConfig = ` servers = ["localhost:1883"] # required. @@ -55,6 +59,12 @@ var sampleConfig = ` ## actually reads it # retain = false + ## Defines the maximum length of time that the broker and client may not communicate. + ## Defaults to 0 which turns the feature off. For version v2.0.12 of eclipse/mosquitto there is a + ## [bug](https://github.com/eclipse/mosquitto/issues/2117) which requires keep_alive to be set. + ## As a reference eclipse/paho.mqtt.golang v1.3.0 defaults to 30. + # keep_alive = 0 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -72,8 +82,9 @@ type MQTT struct { QoS int `toml:"qos"` ClientID string `toml:"client_id"` tls.ClientConfig - BatchMessage bool `toml:"batch"` - Retain bool `toml:"retain"` + BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` + KeepAlive int64 `toml:"keep_alive"` client paho.Client opts *paho.ClientOptions @@ -190,7 +201,7 @@ func (m *MQTT) publish(topic string, body []byte) error { func (m *MQTT) createOpts() (*paho.ClientOptions, error) { opts := paho.NewClientOptions() - opts.KeepAlive = 0 + opts.KeepAlive = m.KeepAlive if m.Timeout < config.Duration(time.Second) { m.Timeout = config.Duration(5 * time.Second) @@ -237,6 +248,8 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { func init() { outputs.Add("mqtt", func() telegraf.Output { - return &MQTT{} + return &MQTT{ + KeepAlive: defaultKeepAlive, + } }) } diff --git a/plugins/outputs/mqtt/mqtt_test.go b/plugins/outputs/mqtt/mqtt_test.go index 8affce1c93ddf..fd36d6d0577ac 100644 --- a/plugins/outputs/mqtt/mqtt_test.go +++ b/plugins/outputs/mqtt/mqtt_test.go @@ -19,6 +19,7 @@ func TestConnectAndWriteIntegration(t *testing.T) { m := &MQTT{ Servers: []string{url}, serializer: s, + KeepAlive: 30, } // Verify that we can connect to the MQTT broker diff --git a/plugins/outputs/opentelemetry/opentelemetry.go b/plugins/outputs/opentelemetry/opentelemetry.go index ea68fbae6323a..e1bbc9322e759 100644 --- a/plugins/outputs/opentelemetry/opentelemetry.go +++ b/plugins/outputs/opentelemetry/opentelemetry.go @@ -13,6 +13,9 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + // This causes the gRPC library to register gzip compression. + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" ) type OpenTelemetry struct { @@ -159,7 +162,19 @@ func (o *OpenTelemetry) Write(metrics []telegraf.Metric) error { return nil } + if len(o.Attributes) > 0 { + for i := 0; i < md.ResourceMetrics().Len(); i++ { + for k, v := range o.Attributes { + md.ResourceMetrics().At(i).Resource().Attributes().UpsertString(k, v) + } + } + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.Timeout)) + + if len(o.Headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(o.Headers)) + } defer cancel() _, err := o.metricsServiceClient.Export(ctx, md, o.callOptions...) return err diff --git a/plugins/outputs/opentelemetry/opentelemetry_test.go b/plugins/outputs/opentelemetry/opentelemetry_test.go index b61f480978ee4..6ebf1829bd540 100644 --- a/plugins/outputs/opentelemetry/opentelemetry_test.go +++ b/plugins/outputs/opentelemetry/opentelemetry_test.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/model/otlpgrpc" "go.opentelemetry.io/collector/model/pdata" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) func TestOpenTelemetry(t *testing.T) { @@ -25,15 +26,16 @@ func TestOpenTelemetry(t *testing.T) { { rm := expect.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("host.name", "potato") + rm.Resource().Attributes().InsertString("attr-key", "attr-val") ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() ilm.InstrumentationLibrary().SetName("My Library Name") m := ilm.Metrics().AppendEmpty() m.SetName("cpu_temp") m.SetDataType(pdata.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() - dp.LabelsMap().Insert("foo", "bar") + dp.Attributes().InsertString("foo", "bar") dp.SetTimestamp(pdata.Timestamp(1622848686000000000)) - dp.SetValue(87.332) + dp.SetDoubleVal(87.332) } m := newMockOtelService(t) t.Cleanup(m.Cleanup) @@ -43,6 +45,8 @@ func TestOpenTelemetry(t *testing.T) { plugin := &OpenTelemetry{ ServiceAddress: m.Address(), Timeout: config.Duration(time.Second), + Headers: map[string]string{"test": "header1"}, + Attributes: map[string]string{"attr-key": "attr-val"}, metricsConverter: metricsConverter, grpcClientConn: m.GrpcClient(), metricsServiceClient: otlpgrpc.NewMetricsClient(m.GrpcClient()), @@ -131,5 +135,8 @@ func (m *mockOtelService) Address() string { func (m *mockOtelService) Export(ctx context.Context, request pdata.Metrics) (otlpgrpc.MetricsResponse, error) { m.metrics = request.Clone() + ctxMetadata, ok := metadata.FromIncomingContext(ctx) + assert.Equal(m.t, []string{"header1"}, ctxMetadata.Get("test")) + assert.True(m.t, ok) return otlpgrpc.MetricsResponse{}, nil } diff --git a/plugins/outputs/opentsdb/opentsdb_http.go b/plugins/outputs/opentsdb/opentsdb_http.go index b164765850578..582a9bb85fc9a 100644 --- a/plugins/outputs/opentsdb/opentsdb_http.go +++ b/plugins/outputs/opentsdb/opentsdb_http.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "net/http" "net/http/httputil" @@ -163,7 +162,7 @@ func (o *openTSDBHttp) flush() error { fmt.Printf("Received response\n%s\n\n", dump) } else { // Important so http client reuse connection for next request if need be. - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) } if resp.StatusCode/100 != 2 { diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go index 39b8fec262095..95fa97fb688b7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -261,7 +261,7 @@ rpc_duration_seconds_count 2693 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -392,7 +392,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -422,7 +422,7 @@ func TestLandingPage(t *testing.T) { resp, err := http.Get(u.String()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, expected, strings.TrimSpace(string(actual))) diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go index 27be9103b28bd..c5ff76d4017a7 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -321,7 +321,7 @@ cpu_usage_idle_count{cpu="cpu1"} 20 require.NoError(t, err) require.Equal(t, http.StatusOK, resp.StatusCode) defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, @@ -452,7 +452,7 @@ rpc_duration_seconds_count 2693 resp, err := http.Get(output.URL()) require.NoError(t, err) - actual, err := ioutil.ReadAll(resp.Body) + actual, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, diff --git a/plugins/outputs/sensu/sensu.go b/plugins/outputs/sensu/sensu.go index 568f8f7a144e4..3cd8b2274e52a 100644 --- a/plugins/outputs/sensu/sensu.go +++ b/plugins/outputs/sensu/sensu.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math" "net/http" "net/url" @@ -336,7 +335,7 @@ func (s *Sensu) write(reqBody []byte) error { defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { - bodyData, err := ioutil.ReadAll(resp.Body) + bodyData, err := io.ReadAll(resp.Body) if err != nil { s.Log.Debugf("Couldn't read response body: %v", err) } diff --git a/plugins/outputs/sensu/sensu_test.go b/plugins/outputs/sensu/sensu_test.go index 249775727a481..e7a272ed5e149 100644 --- a/plugins/outputs/sensu/sensu_test.go +++ b/plugins/outputs/sensu/sensu_test.go @@ -3,7 +3,7 @@ package sensu import ( "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" "net/http/httptest" @@ -118,7 +118,7 @@ func TestConnectAndWrite(t *testing.T) { require.Equal(t, expectedURL, r.URL.String()) require.Equal(t, expectedAuthHeader, r.Header.Get("Authorization")) // let's make sure what we received is a valid Sensu event that contains all of the expected data - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) receivedEvent := &corev2.Event{} err = json.Unmarshal(body, receivedEvent) diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 3c20583e15e20..0decb644cccab 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "io/ioutil" "net" "os" "path/filepath" @@ -46,7 +45,7 @@ func TestSocketWriter_udp(t *testing.T) { } func TestSocketWriter_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") @@ -71,7 +70,7 @@ func TestSocketWriter_unixgram(t *testing.T) { t.Skip("Skipping on Windows, as unixgram sockets are not supported") } - tmpdir, err := ioutil.TempDir("", "telegraf") + tmpdir, err := os.MkdirTemp("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") diff --git a/plugins/outputs/sql/README.md b/plugins/outputs/sql/README.md index 6fb215612ecaf..77b89762a7a87 100644 --- a/plugins/outputs/sql/README.md +++ b/plugins/outputs/sql/README.md @@ -104,6 +104,7 @@ through the convert settings. # timestamp = "TIMESTAMP" # defaultvalue = "TEXT" # unsigned = "UNSIGNED" + # bool = "BOOL" ``` ## Driver-specific information diff --git a/plugins/outputs/sql/sql.go b/plugins/outputs/sql/sql.go index 3e003d3309873..fecaf2f6e7661 100644 --- a/plugins/outputs/sql/sql.go +++ b/plugins/outputs/sql/sql.go @@ -22,6 +22,7 @@ type ConvertStruct struct { Timestamp string Defaultvalue string Unsigned string + Bool string } type SQL struct { @@ -103,6 +104,8 @@ func (p *SQL) deriveDatatype(value interface{}) string { datatype = p.Convert.Real case string: datatype = p.Convert.Text + case bool: + datatype = p.Convert.Bool default: datatype = p.Convert.Defaultvalue p.Log.Errorf("Unknown datatype: '%T' %v", value, value) @@ -272,6 +275,7 @@ func newSQL() *SQL { Timestamp: "TIMESTAMP", Defaultvalue: "TEXT", Unsigned: "UNSIGNED", + Bool: "BOOL", }, } } diff --git a/plugins/outputs/sql/sql_test.go b/plugins/outputs/sql/sql_test.go index c57570442c617..ef02c89b11fad 100644 --- a/plugins/outputs/sql/sql_test.go +++ b/plugins/outputs/sql/sql_test.go @@ -3,7 +3,6 @@ package sql import ( "context" "fmt" - "io/ioutil" "math/rand" "os" "path/filepath" @@ -100,6 +99,14 @@ var ( Key: "int64_two", Value: int64(2345), }, + { + Key: "bool_one", + Value: true, + }, + { + Key: "bool_two", + Value: false, + }, }, ts, ), @@ -154,7 +161,7 @@ func TestMysqlIntegration(t *testing.T) { const username = "root" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-mysql-*") + outDir, err := os.MkdirTemp("", "tg-mysql-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -222,9 +229,9 @@ func TestMysqlIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/mariadb/expected.sql") + expected, err := os.ReadFile("testdata/mariadb/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } @@ -244,7 +251,7 @@ func TestPostgresIntegration(t *testing.T) { const username = "postgres" password := pwgen(32) - outDir, err := ioutil.TempDir("", "tg-postgres-*") + outDir, err := os.MkdirTemp("", "tg-postgres-*") require.NoError(t, err) defer os.RemoveAll(outDir) @@ -321,9 +328,9 @@ func TestPostgresIntegration(t *testing.T) { require.FileExists(t, dumpfile) //compare the dump to what we expected - expected, err := ioutil.ReadFile("testdata/postgres/expected.sql") + expected, err := os.ReadFile("testdata/postgres/expected.sql") require.NoError(t, err) - actual, err := ioutil.ReadFile(dumpfile) + actual, err := os.ReadFile(dumpfile) require.NoError(t, err) require.Equal(t, string(expected), string(actual)) } diff --git a/plugins/outputs/sql/sqlite.go b/plugins/outputs/sql/sqlite.go index 3703f42923ac1..15666101a957d 100644 --- a/plugins/outputs/sql/sqlite.go +++ b/plugins/outputs/sql/sqlite.go @@ -1,4 +1,7 @@ -// +build linux,freebsd,darwin +//go:build linux && freebsd && darwin && (!mips || !mips64) +// +build linux +// +build freebsd +// +build darwin // +build !mips !mips64 package sql diff --git a/plugins/outputs/sql/sqlite_test.go b/plugins/outputs/sql/sqlite_test.go index 6ed08a2570662..7707f9d085e7e 100644 --- a/plugins/outputs/sql/sqlite_test.go +++ b/plugins/outputs/sql/sqlite_test.go @@ -1,11 +1,12 @@ -// +build linux,freebsd +//go:build linux && freebsd && (!mips || !mips64) +// +build linux +// +build freebsd // +build !mips !mips64 package sql import ( gosql "database/sql" - "io/ioutil" "os" "path/filepath" "testing" @@ -16,7 +17,7 @@ import ( ) func TestSqlite(t *testing.T) { - outDir, err := ioutil.TempDir("", "tg-sqlite-*") + outDir, err := os.MkdirTemp("", "tg-sqlite-*") require.NoError(t, err) defer os.RemoveAll(outDir) diff --git a/plugins/outputs/sql/testdata/mariadb/expected.sql b/plugins/outputs/sql/testdata/mariadb/expected.sql index 49a3095db4da2..43e0fa5e545b0 100644 --- a/plugins/outputs/sql/testdata/mariadb/expected.sql +++ b/plugins/outputs/sql/testdata/mariadb/expected.sql @@ -21,10 +21,12 @@ CREATE TABLE `metric_one` ( `tag_one` text DEFAULT NULL, `tag_two` text DEFAULT NULL, `int64_one` int(11) DEFAULT NULL, - `int64_two` int(11) DEFAULT NULL + `int64_two` int(11) DEFAULT NULL, + `bool_one` tinyint(1) DEFAULT NULL, + `bool_two` tinyint(1) DEFAULT NULL ); /*!40101 SET character_set_client = @saved_cs_client */; -INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345,1,0); /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `metric_two` ( diff --git a/plugins/outputs/sql/testdata/postgres/expected.sql b/plugins/outputs/sql/testdata/postgres/expected.sql index 8bc2b2fc83018..c1ee733ac12d4 100644 --- a/plugins/outputs/sql/testdata/postgres/expected.sql +++ b/plugins/outputs/sql/testdata/postgres/expected.sql @@ -21,7 +21,9 @@ CREATE TABLE public.metric_one ( tag_one text, tag_two text, int64_one integer, - int64_two integer + int64_two integer, + bool_one boolean, + bool_two boolean ); ALTER TABLE public.metric_one OWNER TO postgres; CREATE TABLE public.metric_two ( @@ -33,8 +35,8 @@ ALTER TABLE public.metric_two OWNER TO postgres; COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; 2021-05-17 22:04:45 tag4 string2 \. -COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; -2021-05-17 22:04:45 tag1 tag2 1234 2345 +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two, bool_one, bool_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 t f \. COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; 2021-05-17 22:04:45 tag3 string1 diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index 4d561a27b5007..d4f660ff7c569 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. + monitoring "cloud.google.com/go/monitoring/apiv3/v2" // Imports the Stackdriver Monitoring client package. googlepb "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" @@ -218,7 +218,7 @@ func (s *Stackdriver) Write(metrics []telegraf.Metric) error { // Prepare time series request. timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), TimeSeries: timeSeries, } diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go index 7ddaa44854620..8af553b374c53 100644 --- a/plugins/outputs/stackdriver/stackdriver_test.go +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" "github.com/golang/protobuf/proto" emptypb "github.com/golang/protobuf/ptypes/empty" googlepb "github.com/golang/protobuf/ptypes/timestamp" diff --git a/plugins/outputs/sumologic/sumologic_test.go b/plugins/outputs/sumologic/sumologic_test.go index 5ce502bab2c0e..5629defa4506e 100644 --- a/plugins/outputs/sumologic/sumologic_test.go +++ b/plugins/outputs/sumologic/sumologic_test.go @@ -6,7 +6,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -300,7 +299,7 @@ func TestContentEncodingGzip(t *testing.T) { body, err := gzip.NewReader(r.Body) require.NoError(t, err) - payload, err := ioutil.ReadAll(body) + payload, err := io.ReadAll(body) require.NoError(t, err) assert.Equal(t, string(payload), "metric=cpu field=value 42 0\n") diff --git a/plugins/outputs/timestream/timestream.go b/plugins/outputs/timestream/timestream.go index 2c77c408e7357..6478563b6b245 100644 --- a/plugins/outputs/timestream/timestream.go +++ b/plugins/outputs/timestream/timestream.go @@ -1,7 +1,9 @@ package timestream import ( + "context" "encoding/binary" + "errors" "fmt" "hash/fnv" "reflect" @@ -11,9 +13,10 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" + "github.com/aws/smithy-go" internalaws "github.com/influxdata/telegraf/config/aws" ) @@ -38,9 +41,9 @@ type ( } WriteClient interface { - CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) - WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) - DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) + CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) + WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) + DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) } ) @@ -57,7 +60,7 @@ const MaxRecordsPerCall = 100 var sampleConfig = ` ## Amazon Region region = "us-east-1" - + ## Amazon Credentials ## Credentials are loaded in the following order: ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified @@ -75,7 +78,7 @@ var sampleConfig = ` #role_session_name = "" #profile = "" #shared_credential_file = "" - + ## Endpoint to make request against, the correct endpoint is automatically ## determined and this option should only be set if you wish to override the ## default. @@ -88,7 +91,7 @@ var sampleConfig = ` ## Specifies if the plugin should describe the Timestream database upon starting ## to validate if it has access necessary permissions, connection, etc., as a safety check. - ## If the describe operation fails, the plugin will not start + ## If the describe operation fails, the plugin will not start ## and therefore the Telegraf agent will not start. describe_database_on_start = false @@ -97,17 +100,17 @@ var sampleConfig = ` ## For example, consider the following data in line protocol format: ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 - ## where weather and airquality are the measurement names, location and season are tags, + ## where weather and airquality are the measurement names, location and season are tags, ## and temperature, humidity, no2, pm25 are fields. ## In multi-table mode: ## - first line will be ingested to table named weather ## - second line will be ingested to table named airquality ## - the tags will be represented as dimensions ## - first table (weather) will have two records: - ## one with measurement name equals to temperature, + ## one with measurement name equals to temperature, ## another with measurement name equals to humidity ## - second table (airquality) will have two records: - ## one with measurement name equals to no2, + ## one with measurement name equals to no2, ## another with measurement name equals to pm25 ## - the Timestream tables from the example will look like this: ## TABLE "weather": @@ -141,7 +144,7 @@ var sampleConfig = ` ## Specifies the Timestream table where the metrics will be uploaded. # single_table_name = "yourTableNameHere" - ## Only valid and required for mapping_mode = "single-table" + ## Only valid and required for mapping_mode = "single-table" ## Describes what will be the Timestream dimension name for the Telegraf ## measurement name. # single_table_dimension_name_for_telegraf_measurement_name = "namespace" @@ -169,9 +172,12 @@ var sampleConfig = ` ` // WriteFactory function provides a way to mock the client instantiation for testing purposes. -var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) WriteClient { - configProvider := credentialConfig.Credentials() - return timestreamwrite.New(configProvider) +var WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + cfg, err := credentialConfig.Credentials() + if err != nil { + return ×treamwrite.Client{}, err + } + return timestreamwrite.NewFromConfig(cfg), nil } func (t *Timestream) Connect() error { @@ -221,7 +227,10 @@ func (t *Timestream) Connect() error { t.Log.Infof("Constructing Timestream client for '%s' mode", t.MappingMode) - svc := WriteFactory(&t.CredentialConfig) + svc, err := WriteFactory(&t.CredentialConfig) + if err != nil { + return err + } if t.DescribeDatabaseOnStart { t.Log.Infof("Describing database '%s' in region '%s'", t.DatabaseName, t.Region) @@ -229,7 +238,7 @@ func (t *Timestream) Connect() error { describeDatabaseInput := ×treamwrite.DescribeDatabaseInput{ DatabaseName: aws.String(t.DatabaseName), } - describeDatabaseOutput, err := svc.DescribeDatabase(describeDatabaseInput) + describeDatabaseOutput, err := svc.DescribeDatabase(context.Background(), describeDatabaseInput) if err != nil { t.Log.Errorf("Couldn't describe database '%s'. Check error, fix permissions, connectivity, create database.", t.DatabaseName) return err @@ -272,33 +281,45 @@ func (t *Timestream) Write(metrics []telegraf.Metric) error { func (t *Timestream) writeToTimestream(writeRecordsInput *timestreamwrite.WriteRecordsInput, resourceNotFoundRetry bool) error { t.Log.Debugf("Writing to Timestream: '%v' with ResourceNotFoundRetry: '%t'", writeRecordsInput, resourceNotFoundRetry) - _, err := t.svc.WriteRecords(writeRecordsInput) + _, err := t.svc.WriteRecords(context.Background(), writeRecordsInput) if err != nil { // Telegraf will retry ingesting the metrics if an error is returned from the plugin. // Therefore, return error only for retryable exceptions: ThrottlingException and 5xx exceptions. - if e, ok := err.(awserr.Error); ok { - switch e.Code() { - case timestreamwrite.ErrCodeResourceNotFoundException: - if resourceNotFoundRetry { - t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", - t.DatabaseName, *writeRecordsInput.TableName, e) - return t.createTableAndRetry(writeRecordsInput) - } - t.logWriteToTimestreamError(err, writeRecordsInput.TableName) - case timestreamwrite.ErrCodeThrottlingException: - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", - t.DatabaseName, *writeRecordsInput.TableName, err) - case timestreamwrite.ErrCodeInternalServerException: - return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", - t.DatabaseName, *writeRecordsInput.TableName, err) - default: - t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + var notFound *types.ResourceNotFoundException + if errors.As(err, ¬Found) { + if resourceNotFoundRetry { + t.Log.Warnf("Failed to write to Timestream database '%s' table '%s'. Error: '%s'", + t.DatabaseName, *writeRecordsInput.TableName, notFound) + return t.createTableAndRetry(writeRecordsInput) } - } else { + t.logWriteToTimestreamError(notFound, writeRecordsInput.TableName) + } + + var rejected *types.RejectedRecordsException + if errors.As(err, &rejected) { + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) + return nil + } + + var throttling *types.ThrottlingException + if errors.As(err, &throttling) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, throttling) + } + + var internal *types.InternalServerException + if errors.As(err, &internal) { + return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", + t.DatabaseName, *writeRecordsInput.TableName, internal) + } + + var operation *smithy.OperationError + if !errors.As(err, &operation) { // Retry other, non-aws errors. return fmt.Errorf("unable to write to Timestream database '%s' table '%s'. Error: %s", t.DatabaseName, *writeRecordsInput.TableName, err) } + t.logWriteToTimestreamError(err, writeRecordsInput.TableName) } return nil } @@ -328,27 +349,25 @@ func (t *Timestream) createTable(tableName *string) error { createTableInput := ×treamwrite.CreateTableInput{ DatabaseName: aws.String(t.DatabaseName), TableName: aws.String(*tableName), - RetentionProperties: ×treamwrite.RetentionProperties{ - MagneticStoreRetentionPeriodInDays: aws.Int64(t.CreateTableMagneticStoreRetentionPeriodInDays), - MemoryStoreRetentionPeriodInHours: aws.Int64(t.CreateTableMemoryStoreRetentionPeriodInHours), + RetentionProperties: &types.RetentionProperties{ + MagneticStoreRetentionPeriodInDays: t.CreateTableMagneticStoreRetentionPeriodInDays, + MemoryStoreRetentionPeriodInHours: t.CreateTableMemoryStoreRetentionPeriodInHours, }, } - var tags []*timestreamwrite.Tag + var tags []types.Tag for key, val := range t.CreateTableTags { - tags = append(tags, ×treamwrite.Tag{ + tags = append(tags, types.Tag{ Key: aws.String(key), Value: aws.String(val), }) } - createTableInput.SetTags(tags) + createTableInput.Tags = tags - _, err := t.svc.CreateTable(createTableInput) + _, err := t.svc.CreateTable(context.Background(), createTableInput) if err != nil { - if e, ok := err.(awserr.Error); ok { + if _, ok := err.(*types.ConflictException); ok { // if the table was created in the meantime, it's ok. - if e.Code() == timestreamwrite.ErrCodeConflictException { - return nil - } + return nil } return err } @@ -374,17 +393,17 @@ func (t *Timestream) TransformMetrics(metrics []telegraf.Metric) []*timestreamwr newWriteRecord := ×treamwrite.WriteRecordsInput{ DatabaseName: aws.String(t.DatabaseName), Records: records, - CommonAttributes: ×treamwrite.Record{ + CommonAttributes: &types.Record{ Dimensions: dimensions, Time: aws.String(timeValue), - TimeUnit: aws.String(timeUnit), + TimeUnit: timeUnit, }, } if t.MappingMode == MappingModeSingleTable { - newWriteRecord.SetTableName(t.SingleTableName) + newWriteRecord.TableName = &t.SingleTableName } if t.MappingMode == MappingModeMultiTable { - newWriteRecord.SetTableName(m.Name()) + newWriteRecord.TableName = aws.String(m.Name()) } writeRequests[id] = newWriteRecord @@ -434,17 +453,17 @@ func hashFromMetricTimeNameTagKeys(m telegraf.Metric) uint64 { return h.Sum64() } -func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.Dimension { - var dimensions []*timestreamwrite.Dimension +func (t *Timestream) buildDimensions(point telegraf.Metric) []types.Dimension { + var dimensions []types.Dimension for tagName, tagValue := range point.Tags() { - dimension := ×treamwrite.Dimension{ + dimension := types.Dimension{ Name: aws.String(tagName), Value: aws.String(tagValue), } dimensions = append(dimensions, dimension) } if t.MappingMode == MappingModeSingleTable { - dimension := ×treamwrite.Dimension{ + dimension := types.Dimension{ Name: aws.String(t.SingleTableDimensionNameForTelegrafMeasurementName), Value: aws.String(point.Name()), } @@ -457,8 +476,8 @@ func (t *Timestream) buildDimensions(point telegraf.Metric) []*timestreamwrite.D // Tags and time are not included - common attributes are built separately. // Records with unsupported Metric Field type are skipped. // It returns an array of Timestream write records. -func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite.Record { - var records []*timestreamwrite.Record +func (t *Timestream) buildWriteRecords(point telegraf.Metric) []types.Record { + var records []types.Record for fieldName, fieldValue := range point.Fields() { stringFieldValue, stringFieldValueType, ok := convertValue(fieldValue) if !ok { @@ -467,9 +486,9 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite fieldName, reflect.TypeOf(fieldValue)) continue } - record := ×treamwrite.Record{ + record := types.Record{ MeasureName: aws.String(fieldName), - MeasureValueType: aws.String(stringFieldValueType), + MeasureValueType: stringFieldValueType, MeasureValue: aws.String(stringFieldValue), } records = append(records, record) @@ -480,13 +499,13 @@ func (t *Timestream) buildWriteRecords(point telegraf.Metric) []*timestreamwrite // partitionRecords splits the Timestream records into smaller slices of a max size // so that are under the limit for the Timestream API call. // It returns the array of array of records. -func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestreamwrite.Record { +func partitionRecords(size int, records []types.Record) [][]types.Record { numberOfPartitions := len(records) / size if len(records)%size != 0 { numberOfPartitions++ } - partitions := make([][]*timestreamwrite.Record, numberOfPartitions) + partitions := make([][]types.Record, numberOfPartitions) for i := 0; i < numberOfPartitions; i++ { start := size * i @@ -503,25 +522,19 @@ func partitionRecords(size int, records []*timestreamwrite.Record) [][]*timestre // getTimestreamTime produces Timestream TimeUnit and TimeValue with minimum possible granularity // while maintaining the same information. -func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) { - const ( - TimeUnitS = "SECONDS" - TimeUnitMS = "MILLISECONDS" - TimeUnitUS = "MICROSECONDS" - TimeUnitNS = "NANOSECONDS" - ) - nanosTime := time.UnixNano() +func getTimestreamTime(t time.Time) (timeUnit types.TimeUnit, timeValue string) { + nanosTime := t.UnixNano() if nanosTime%1e9 == 0 { - timeUnit = TimeUnitS + timeUnit = types.TimeUnitSeconds timeValue = strconv.FormatInt(nanosTime/1e9, 10) } else if nanosTime%1e6 == 0 { - timeUnit = TimeUnitMS + timeUnit = types.TimeUnitMilliseconds timeValue = strconv.FormatInt(nanosTime/1e6, 10) } else if nanosTime%1e3 == 0 { - timeUnit = TimeUnitUS + timeUnit = types.TimeUnitMicroseconds timeValue = strconv.FormatInt(nanosTime/1e3, 10) } else { - timeUnit = TimeUnitNS + timeUnit = types.TimeUnitNanoseconds timeValue = strconv.FormatInt(nanosTime, 10) } return @@ -529,61 +542,55 @@ func getTimestreamTime(time time.Time) (timeUnit string, timeValue string) { // convertValue converts single Field value from Telegraf Metric and produces // value, valueType Timestream representation. -func convertValue(v interface{}) (value string, valueType string, ok bool) { - const ( - TypeBigInt = "BIGINT" - TypeDouble = "DOUBLE" - TypeBoolean = "BOOLEAN" - TypeVarchar = "VARCHAR" - ) +func convertValue(v interface{}) (value string, valueType types.MeasureValueType, ok bool) { ok = true switch t := v.(type) { case int: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int8: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int16: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int32: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(int64(t), 10) case int64: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatInt(t, 10) case uint: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint8: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint16: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint32: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(uint64(t), 10) case uint64: - valueType = TypeBigInt + valueType = types.MeasureValueTypeBigint value = strconv.FormatUint(t, 10) case float32: - valueType = TypeDouble + valueType = types.MeasureValueTypeDouble value = strconv.FormatFloat(float64(t), 'f', -1, 32) case float64: - valueType = TypeDouble + valueType = types.MeasureValueTypeDouble value = strconv.FormatFloat(t, 'f', -1, 64) case bool: - valueType = TypeBoolean + valueType = types.MeasureValueTypeBoolean if t { value = "true" } else { value = "false" } case string: - valueType = TypeVarchar + valueType = types.MeasureValueTypeVarchar value = t default: // Skip unsupported type. diff --git a/plugins/outputs/timestream/timestream_internal_test.go b/plugins/outputs/timestream/timestream_internal_test.go index 1f1194d707b69..d151c10d4b146 100644 --- a/plugins/outputs/timestream/timestream_internal_test.go +++ b/plugins/outputs/timestream/timestream_internal_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" "github.com/stretchr/testify/assert" ) @@ -19,57 +19,57 @@ func TestGetTimestreamTime(t *testing.T) { tOnlySeconds := time.Date(2020, time.November, 10, 23, 44, 20, 0, time.UTC) tUnitNanos, tValueNanos := getTimestreamTime(tWithNanos) - assertions.Equal("NANOSECONDS", tUnitNanos) + assertions.Equal(types.TimeUnitNanoseconds, tUnitNanos) assertions.Equal("1605051860000000123", tValueNanos) tUnitMicros, tValueMicros := getTimestreamTime(tWithMicros) - assertions.Equal("MICROSECONDS", tUnitMicros) + assertions.Equal(types.TimeUnitMicroseconds, tUnitMicros) assertions.Equal("1605051860000123", tValueMicros) tUnitMillis, tValueMillis := getTimestreamTime(tWithMillis) - assertions.Equal("MILLISECONDS", tUnitMillis) + assertions.Equal(types.TimeUnitMilliseconds, tUnitMillis) assertions.Equal("1605051860123", tValueMillis) tUnitSeconds, tValueSeconds := getTimestreamTime(tOnlySeconds) - assertions.Equal("SECONDS", tUnitSeconds) + assertions.Equal(types.TimeUnitSeconds, tUnitSeconds) assertions.Equal("1605051860", tValueSeconds) } func TestPartitionRecords(t *testing.T) { assertions := assert.New(t) - testDatum := timestreamwrite.Record{ + testDatum := types.Record{ MeasureName: aws.String("Foo"), - MeasureValueType: aws.String("DOUBLE"), + MeasureValueType: types.MeasureValueTypeDouble, MeasureValue: aws.String("123"), } - var zeroDatum []*timestreamwrite.Record - oneDatum := []*timestreamwrite.Record{&testDatum} - twoDatum := []*timestreamwrite.Record{&testDatum, &testDatum} - threeDatum := []*timestreamwrite.Record{&testDatum, &testDatum, &testDatum} + var zeroDatum []types.Record + oneDatum := []types.Record{testDatum} + twoDatum := []types.Record{testDatum, testDatum} + threeDatum := []types.Record{testDatum, testDatum, testDatum} - assertions.Equal([][]*timestreamwrite.Record{}, partitionRecords(2, zeroDatum)) - assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]*timestreamwrite.Record{oneDatum}, partitionRecords(2, oneDatum)) - assertions.Equal([][]*timestreamwrite.Record{twoDatum}, partitionRecords(2, twoDatum)) - assertions.Equal([][]*timestreamwrite.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) + assertions.Equal([][]types.Record{}, partitionRecords(2, zeroDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{oneDatum}, partitionRecords(2, oneDatum)) + assertions.Equal([][]types.Record{twoDatum}, partitionRecords(2, twoDatum)) + assertions.Equal([][]types.Record{twoDatum, oneDatum}, partitionRecords(2, threeDatum)) } func TestConvertValueSupported(t *testing.T) { intInputValues := []interface{}{-1, int8(-2), int16(-3), int32(-4), int64(-5)} intOutputValues := []string{"-1", "-2", "-3", "-4", "-5"} - intOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + intOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} testConvertValueSupportedCases(t, intInputValues, intOutputValues, intOutputValueTypes) uintInputValues := []interface{}{uint(1), uint8(2), uint16(3), uint32(4), uint64(5)} uintOutputValues := []string{"1", "2", "3", "4", "5"} - uintOutputValueTypes := []string{"BIGINT", "BIGINT", "BIGINT", "BIGINT", "BIGINT"} + uintOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint, types.MeasureValueTypeBigint} testConvertValueSupportedCases(t, uintInputValues, uintOutputValues, uintOutputValueTypes) otherInputValues := []interface{}{"foo", float32(22.123), 22.1234, true} otherOutputValues := []string{"foo", "22.123", "22.1234", "true"} - otherOutputValueTypes := []string{"VARCHAR", "DOUBLE", "DOUBLE", "BOOLEAN"} + otherOutputValueTypes := []types.MeasureValueType{types.MeasureValueTypeVarchar, types.MeasureValueTypeDouble, types.MeasureValueTypeDouble, types.MeasureValueTypeBoolean} testConvertValueSupportedCases(t, otherInputValues, otherOutputValues, otherOutputValueTypes) } @@ -80,7 +80,7 @@ func TestConvertValueUnsupported(t *testing.T) { } func testConvertValueSupportedCases(t *testing.T, - inputValues []interface{}, outputValues []string, outputValueTypes []string) { + inputValues []interface{}, outputValues []string, outputValueTypes []types.MeasureValueType) { assertions := assert.New(t) for i, inputValue := range inputValues { v, vt, ok := convertValue(inputValue) diff --git a/plugins/outputs/timestream/timestream_test.go b/plugins/outputs/timestream/timestream_test.go index 67cdb4495c1d8..be61a06a15358 100644 --- a/plugins/outputs/timestream/timestream_test.go +++ b/plugins/outputs/timestream/timestream_test.go @@ -1,8 +1,8 @@ -package timestream_test +package timestream import ( + "context" "fmt" - "github.com/aws/aws-sdk-go/aws/awserr" "reflect" "sort" "strconv" @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite" + "github.com/aws/aws-sdk-go-v2/service/timestreamwrite/types" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" - ts "github.com/influxdata/telegraf/plugins/outputs/timestream" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -33,41 +33,37 @@ var time2 = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) const time2Epoch = "1257894000" -const timeUnit = "SECONDS" - const metricName1 = "metricName1" const metricName2 = "metricName2" -type mockTimestreamClient struct { -} +type mockTimestreamClient struct{} -func (m *mockTimestreamClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { +func (m *mockTimestreamClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { return nil, nil } -func (m *mockTimestreamClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { +func (m *mockTimestreamClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { return nil, nil } -func (m *mockTimestreamClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { +func (m *mockTimestreamClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { return nil, fmt.Errorf("hello from DescribeDatabase") } func TestConnectValidatesConfigParameters(t *testing.T) { assertions := assert.New(t) - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { - return &mockTimestreamClient{} + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { + return &mockTimestreamClient{}, nil } - // checking base arguments - noDatabaseName := ts.Timestream{Log: testutil.Logger{}} + noDatabaseName := Timestream{Log: testutil.Logger{}} assertions.Contains(noDatabaseName.Connect().Error(), "DatabaseName") - noMappingMode := ts.Timestream{ + noMappingMode := Timestream{ DatabaseName: tsDbName, Log: testutil.Logger{}, } assertions.Contains(noMappingMode.Connect().Error(), "MappingMode") - incorrectMappingMode := ts.Timestream{ + incorrectMappingMode := Timestream{ DatabaseName: tsDbName, MappingMode: "foo", Log: testutil.Logger{}, @@ -75,24 +71,24 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Contains(incorrectMappingMode.Connect().Error(), "single-table") // multi-table arguments - validMappingModeMultiTable := ts.Timestream{ + validMappingModeMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, Log: testutil.Logger{}, } assertions.Nil(validMappingModeMultiTable.Connect()) - singleTableNameWithMultiTable := ts.Timestream{ + singleTableNameWithMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, SingleTableName: testSingleTableName, Log: testutil.Logger{}, } assertions.Contains(singleTableNameWithMultiTable.Connect().Error(), "SingleTableName") - singleTableDimensionWithMultiTable := ts.Timestream{ + singleTableDimensionWithMultiTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } @@ -100,25 +96,25 @@ func TestConnectValidatesConfigParameters(t *testing.T) { "SingleTableDimensionNameForTelegrafMeasurementName") // single-table arguments - noTableNameMappingModeSingleTable := ts.Timestream{ + noTableNameMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, Log: testutil.Logger{}, } assertions.Contains(noTableNameMappingModeSingleTable.Connect().Error(), "SingleTableName") - noDimensionNameMappingModeSingleTable := ts.Timestream{ + noDimensionNameMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, SingleTableName: testSingleTableName, Log: testutil.Logger{}, } assertions.Contains(noDimensionNameMappingModeSingleTable.Connect().Error(), "SingleTableDimensionNameForTelegrafMeasurementName") - validConfigurationMappingModeSingleTable := ts.Timestream{ + validConfigurationMappingModeSingleTable := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeSingleTable, + MappingMode: MappingModeSingleTable, SingleTableName: testSingleTableName, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, @@ -126,18 +122,18 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Nil(validConfigurationMappingModeSingleTable.Connect()) // create table arguments - createTableNoMagneticRetention := ts.Timestream{ + createTableNoMagneticRetention := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, Log: testutil.Logger{}, } assertions.Contains(createTableNoMagneticRetention.Connect().Error(), "CreateTableMagneticStoreRetentionPeriodInDays") - createTableNoMemoryRetention := ts.Timestream{ + createTableNoMemoryRetention := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, CreateTableMagneticStoreRetentionPeriodInDays: 3, Log: testutil.Logger{}, @@ -145,9 +141,9 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Contains(createTableNoMemoryRetention.Connect().Error(), "CreateTableMemoryStoreRetentionPeriodInHours") - createTableValid := ts.Timestream{ + createTableValid := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, CreateTableIfNotExists: true, CreateTableMagneticStoreRetentionPeriodInDays: 3, CreateTableMemoryStoreRetentionPeriodInHours: 3, @@ -156,9 +152,9 @@ func TestConnectValidatesConfigParameters(t *testing.T) { assertions.Nil(createTableValid.Connect()) // describe table on start arguments - describeTableInvoked := ts.Timestream{ + describeTableInvoked := Timestream{ DatabaseName: tsDbName, - MappingMode: ts.MappingModeMultiTable, + MappingMode: MappingModeMultiTable, DescribeDatabaseOnStart: true, Log: testutil.Logger{}, } @@ -169,31 +165,30 @@ type mockTimestreamErrorClient struct { ErrorToReturnOnWriteRecords error } -func (m *mockTimestreamErrorClient) CreateTable(*timestreamwrite.CreateTableInput) (*timestreamwrite.CreateTableOutput, error) { +func (m *mockTimestreamErrorClient) CreateTable(context.Context, *timestreamwrite.CreateTableInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.CreateTableOutput, error) { return nil, nil } -func (m *mockTimestreamErrorClient) WriteRecords(*timestreamwrite.WriteRecordsInput) (*timestreamwrite.WriteRecordsOutput, error) { +func (m *mockTimestreamErrorClient) WriteRecords(context.Context, *timestreamwrite.WriteRecordsInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.WriteRecordsOutput, error) { return nil, m.ErrorToReturnOnWriteRecords } -func (m *mockTimestreamErrorClient) DescribeDatabase(*timestreamwrite.DescribeDatabaseInput) (*timestreamwrite.DescribeDatabaseOutput, error) { +func (m *mockTimestreamErrorClient) DescribeDatabase(context.Context, *timestreamwrite.DescribeDatabaseInput, ...func(*timestreamwrite.Options)) (*timestreamwrite.DescribeDatabaseOutput, error) { return nil, nil } func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { assertions := assert.New(t) - - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ - awserr.New(timestreamwrite.ErrCodeThrottlingException, - "Throttling Test", nil), - } + ErrorToReturnOnWriteRecords: &types.ThrottlingException{Message: aws.String("Throttling Test")}, + }, nil } - plugin := ts.Timestream{ - MappingMode: ts.MappingModeMultiTable, + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, DatabaseName: tsDbName, Log: testutil.Logger{}, } - plugin.Connect() + assertions.NoError(plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -209,19 +204,18 @@ func TestThrottlingErrorIsReturnedToTelegraf(t *testing.T) { func TestRejectedRecordsErrorResultsInMetricsBeingSkipped(t *testing.T) { assertions := assert.New(t) - - ts.WriteFactory = func(credentialConfig *internalaws.CredentialConfig) ts.WriteClient { + WriteFactory = func(credentialConfig *internalaws.CredentialConfig) (WriteClient, error) { return &mockTimestreamErrorClient{ - awserr.New(timestreamwrite.ErrCodeRejectedRecordsException, - "RejectedRecords Test", nil), - } + ErrorToReturnOnWriteRecords: &types.RejectedRecordsException{Message: aws.String("RejectedRecords Test")}, + }, nil } - plugin := ts.Timestream{ - MappingMode: ts.MappingModeMultiTable, + + plugin := Timestream{ + MappingMode: MappingModeMultiTable, DatabaseName: tsDbName, Log: testutil.Logger{}, } - plugin.Connect() + assertions.NoError(plugin.Connect()) input := testutil.MustMetric( metricName1, map[string]string{"tag1": "value1"}, @@ -271,7 +265,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { dimensions: map[string]string{testSingleTableDim: metricName1}, measureValues: map[string]string{"value": "20"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2, input3}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -287,7 +281,7 @@ func TestTransformMetricsSkipEmptyMetric(t *testing.T) { dimensions: map[string]string{}, measureValues: map[string]string{"value": "20"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2, input3}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -326,7 +320,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { dimensions: map[string]string{"tag1": "value1", testSingleTableDim: metricName1}, measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, inputs, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -342,7 +336,7 @@ func TestTransformMetricsRequestsAboveLimitAreSplit(t *testing.T) { dimensions: map[string]string{"tag1": "value1"}, measureValues: map[string]string{"value_supported" + strconv.Itoa(maxRecordsInWriteRecordsCall+1): "10"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, inputs, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -378,7 +372,7 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -395,7 +389,7 @@ func TestTransformMetricsDifferentDimensionsSameTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -431,7 +425,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat measureValues: map[string]string{"value_supported1": "20"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -448,7 +442,7 @@ func TestTransformMetricsSameDimensionsDifferentDimensionValuesAreWrittenSeparat measureValues: map[string]string{"value_supported1": "20"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -484,7 +478,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -501,7 +495,7 @@ func TestTransformMetricsSameDimensionsDifferentTimestampsAreWrittenSeparate(t * measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -531,7 +525,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) @@ -542,7 +536,7 @@ func TestTransformMetricsSameDimensionsSameTimestampsAreWrittenTogether(t *testi measureValues: map[string]string{"value_supported1": "10", "value_supported2": "20", "value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } @@ -578,7 +572,7 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1SingleTable, expectedResult2SingleTable}) @@ -595,7 +589,7 @@ func TestTransformMetricsDifferentMetricsAreWrittenToDifferentTablesInMultiTable measureValues: map[string]string{"value_supported3": "30"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{input1, input2}, []*timestreamwrite.WriteRecordsInput{expectedResult1MultiTable, expectedResult2MultiTable}) } @@ -616,7 +610,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { measureValues: map[string]string{"value_supported1": "10"}, }) - comparisonTest(t, ts.MappingModeSingleTable, + comparisonTest(t, MappingModeSingleTable, []telegraf.Metric{metricWithUnsupportedField}, []*timestreamwrite.WriteRecordsInput{expectedResultSingleTable}) @@ -627,7 +621,7 @@ func TestTransformMetricsUnsupportedFieldsAreSkipped(t *testing.T) { measureValues: map[string]string{"value_supported1": "10"}, }) - comparisonTest(t, ts.MappingModeMultiTable, + comparisonTest(t, MappingModeMultiTable, []telegraf.Metric{metricWithUnsupportedField}, []*timestreamwrite.WriteRecordsInput{expectedResultMultiTable}) } @@ -637,10 +631,10 @@ func comparisonTest(t *testing.T, telegrafMetrics []telegraf.Metric, timestreamRecords []*timestreamwrite.WriteRecordsInput, ) { - var plugin ts.Timestream + var plugin Timestream switch mappingMode { - case ts.MappingModeSingleTable: - plugin = ts.Timestream{ + case MappingModeSingleTable: + plugin = Timestream{ MappingMode: mappingMode, DatabaseName: tsDbName, @@ -648,8 +642,8 @@ func comparisonTest(t *testing.T, SingleTableDimensionNameForTelegrafMeasurementName: testSingleTableDim, Log: testutil.Logger{}, } - case ts.MappingModeMultiTable: - plugin = ts.Timestream{ + case MappingModeMultiTable: + plugin = Timestream{ MappingMode: mappingMode, DatabaseName: tsDbName, Log: testutil.Logger{}, @@ -710,20 +704,20 @@ type SimpleInput struct { } func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { - var tsDimensions []*timestreamwrite.Dimension + var tsDimensions []types.Dimension for k, v := range i.dimensions { - tsDimensions = append(tsDimensions, ×treamwrite.Dimension{ + tsDimensions = append(tsDimensions, types.Dimension{ Name: aws.String(k), Value: aws.String(v), }) } - var tsRecords []*timestreamwrite.Record + var tsRecords []types.Record for k, v := range i.measureValues { - tsRecords = append(tsRecords, ×treamwrite.Record{ + tsRecords = append(tsRecords, types.Record{ MeasureName: aws.String(k), MeasureValue: aws.String(v), - MeasureValueType: aws.String("DOUBLE"), + MeasureValueType: types.MeasureValueTypeDouble, }) } @@ -731,10 +725,10 @@ func buildExpectedRecords(i SimpleInput) *timestreamwrite.WriteRecordsInput { DatabaseName: aws.String(tsDbName), TableName: aws.String(i.tableName), Records: tsRecords, - CommonAttributes: ×treamwrite.Record{ + CommonAttributes: &types.Record{ Dimensions: tsDimensions, Time: aws.String(i.t), - TimeUnit: aws.String(timeUnit), + TimeUnit: types.TimeUnitSeconds, }, } diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go index 7826047d7873d..4d3027b1b5331 100644 --- a/plugins/outputs/warp10/warp10.go +++ b/plugins/outputs/warp10/warp10.go @@ -3,7 +3,7 @@ package warp10 import ( "bytes" "fmt" - "io/ioutil" + "io" "log" "math" "net/http" @@ -154,7 +154,7 @@ func (w *Warp10) Write(metrics []telegraf.Metric) error { if resp.StatusCode != http.StatusOK { if w.PrintErrorBody { - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) } diff --git a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go index c6eb9db2ae5b5..dc097da45ac2a 100644 --- a/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go +++ b/plugins/outputs/yandex_cloud_monitoring/yandex_cloud_monitoring.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -172,7 +172,7 @@ func getResponseFromMetadata(c *http.Client, metadataURL string) ([]byte, error) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -242,7 +242,7 @@ func (a *YandexCloudMonitoring) send(body []byte) error { } defer resp.Body.Close() - _, err = ioutil.ReadAll(resp.Body) + _, err = io.ReadAll(resp.Body) if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) } diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index 5d715af1c4aaf..4bbf8c079476b 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -1,4 +1,3 @@ - //line plugins/parsers/influx/machine.go.rl:1 package influx @@ -16,19 +15,16 @@ func (e *readErr) Error() string { } var ( - ErrNameParse = errors.New("expected measurement name") - ErrFieldParse = errors.New("expected field") - ErrTagParse = errors.New("expected tag") + ErrNameParse = errors.New("expected measurement name") + ErrFieldParse = errors.New("expected field") + ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") - ErrParse = errors.New("parse error") - EOF = errors.New("EOF") + ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) - //line plugins/parsers/influx/machine.go.rl:318 - - //line plugins/parsers/influx/machine.go:33 const LineProtocol_start int = 46 const LineProtocol_first_final int = 46 @@ -39,7 +35,6 @@ const LineProtocol_en_discard_line int = 34 const LineProtocol_en_align int = 85 const LineProtocol_en_series int = 37 - //line plugins/parsers/influx/machine.go.rl:321 type Handler interface { @@ -69,26 +64,25 @@ type machine struct { func NewMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_align, } - //line plugins/parsers/influx/machine.go.rl:354 - + //line plugins/parsers/influx/machine.go.rl:355 - + //line plugins/parsers/influx/machine.go.rl:356 - + //line plugins/parsers/influx/machine.go.rl:357 - + //line plugins/parsers/influx/machine.go.rl:358 - + //line plugins/parsers/influx/machine.go.rl:359 - + //line plugins/parsers/influx/machine.go:90 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:360 @@ -98,24 +92,23 @@ func NewMachine(handler Handler) *machine { func NewSeriesMachine(handler Handler) *machine { m := &machine{ - handler: handler, + handler: handler, initState: LineProtocol_en_series, } - //line plugins/parsers/influx/machine.go.rl:371 - + //line plugins/parsers/influx/machine.go.rl:372 - + //line plugins/parsers/influx/machine.go.rl:373 - + //line plugins/parsers/influx/machine.go.rl:374 - + //line plugins/parsers/influx/machine.go.rl:375 - + //line plugins/parsers/influx/machine.go:117 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:376 @@ -135,10 +128,9 @@ func (m *machine) SetData(data []byte) { m.beginMetric = false m.finishMetric = false - //line plugins/parsers/influx/machine.go:140 { - ( m.cs) = LineProtocol_start + (m.cs) = LineProtocol_start } //line plugins/parsers/influx/machine.go.rl:393 @@ -163,391 +155,391 @@ func (m *machine) Next() error { func (m *machine) exec() error { var err error - + //line plugins/parsers/influx/machine.go:168 { - if ( m.p) == ( m.pe) { - goto _test_eof - } - goto _resume + if (m.p) == (m.pe) { + goto _test_eof + } + goto _resume -_again: - switch ( m.cs) { - case 46: - goto st46 - case 1: - goto st1 - case 2: - goto st2 - case 3: - goto st3 - case 0: - goto st0 - case 4: - goto st4 - case 5: - goto st5 - case 6: - goto st6 - case 47: - goto st47 - case 48: - goto st48 - case 49: - goto st49 - case 7: - goto st7 - case 8: - goto st8 - case 9: - goto st9 - case 10: - goto st10 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 53: - goto st53 - case 54: - goto st54 - case 55: - goto st55 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 - case 59: - goto st59 - case 60: - goto st60 - case 61: - goto st61 - case 62: - goto st62 - case 63: - goto st63 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 - case 67: - goto st67 - case 68: - goto st68 - case 69: - goto st69 - case 11: - goto st11 - case 12: - goto st12 - case 13: - goto st13 - case 14: - goto st14 - case 15: - goto st15 - case 70: - goto st70 - case 16: - goto st16 - case 17: - goto st17 - case 71: - goto st71 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 - case 75: - goto st75 - case 76: - goto st76 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 18: - goto st18 - case 19: - goto st19 - case 20: - goto st20 - case 80: - goto st80 - case 21: - goto st21 - case 22: - goto st22 - case 23: - goto st23 - case 81: - goto st81 - case 24: - goto st24 - case 25: - goto st25 - case 82: - goto st82 - case 83: - goto st83 - case 26: - goto st26 - case 27: - goto st27 - case 28: - goto st28 - case 29: - goto st29 - case 30: - goto st30 - case 31: - goto st31 - case 32: - goto st32 - case 33: - goto st33 - case 34: - goto st34 - case 84: - goto st84 - case 37: - goto st37 - case 86: - goto st86 - case 87: - goto st87 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 88: - goto st88 - case 42: - goto st42 - case 89: - goto st89 - case 43: - goto st43 - case 44: - goto st44 - case 45: - goto st45 - case 85: - goto st85 - case 35: - goto st35 - case 36: - goto st36 - } + _again: + switch m.cs { + case 46: + goto st46 + case 1: + goto st1 + case 2: + goto st2 + case 3: + goto st3 + case 0: + goto st0 + case 4: + goto st4 + case 5: + goto st5 + case 6: + goto st6 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 7: + goto st7 + case 8: + goto st8 + case 9: + goto st9 + case 10: + goto st10 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 + case 53: + goto st53 + case 54: + goto st54 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 + case 59: + goto st59 + case 60: + goto st60 + case 61: + goto st61 + case 62: + goto st62 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 + case 67: + goto st67 + case 68: + goto st68 + case 69: + goto st69 + case 11: + goto st11 + case 12: + goto st12 + case 13: + goto st13 + case 14: + goto st14 + case 15: + goto st15 + case 70: + goto st70 + case 16: + goto st16 + case 17: + goto st17 + case 71: + goto st71 + case 72: + goto st72 + case 73: + goto st73 + case 74: + goto st74 + case 75: + goto st75 + case 76: + goto st76 + case 77: + goto st77 + case 78: + goto st78 + case 79: + goto st79 + case 18: + goto st18 + case 19: + goto st19 + case 20: + goto st20 + case 80: + goto st80 + case 21: + goto st21 + case 22: + goto st22 + case 23: + goto st23 + case 81: + goto st81 + case 24: + goto st24 + case 25: + goto st25 + case 82: + goto st82 + case 83: + goto st83 + case 26: + goto st26 + case 27: + goto st27 + case 28: + goto st28 + case 29: + goto st29 + case 30: + goto st30 + case 31: + goto st31 + case 32: + goto st32 + case 33: + goto st33 + case 34: + goto st34 + case 84: + goto st84 + case 37: + goto st37 + case 86: + goto st86 + case 87: + goto st87 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 + case 88: + goto st88 + case 42: + goto st42 + case 89: + goto st89 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 85: + goto st85 + case 35: + goto st35 + case 36: + goto st36 + } - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof - } -_resume: - switch ( m.cs) { - case 46: - goto st_case_46 - case 1: - goto st_case_1 - case 2: - goto st_case_2 - case 3: - goto st_case_3 - case 0: - goto st_case_0 - case 4: - goto st_case_4 - case 5: - goto st_case_5 - case 6: - goto st_case_6 - case 47: - goto st_case_47 - case 48: - goto st_case_48 - case 49: - goto st_case_49 - case 7: - goto st_case_7 - case 8: - goto st_case_8 - case 9: - goto st_case_9 - case 10: - goto st_case_10 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 53: - goto st_case_53 - case 54: - goto st_case_54 - case 55: - goto st_case_55 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 - case 59: - goto st_case_59 - case 60: - goto st_case_60 - case 61: - goto st_case_61 - case 62: - goto st_case_62 - case 63: - goto st_case_63 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 - case 67: - goto st_case_67 - case 68: - goto st_case_68 - case 69: - goto st_case_69 - case 11: - goto st_case_11 - case 12: - goto st_case_12 - case 13: - goto st_case_13 - case 14: - goto st_case_14 - case 15: - goto st_case_15 - case 70: - goto st_case_70 - case 16: - goto st_case_16 - case 17: - goto st_case_17 - case 71: - goto st_case_71 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 - case 75: - goto st_case_75 - case 76: - goto st_case_76 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 18: - goto st_case_18 - case 19: - goto st_case_19 - case 20: - goto st_case_20 - case 80: - goto st_case_80 - case 21: - goto st_case_21 - case 22: - goto st_case_22 - case 23: - goto st_case_23 - case 81: - goto st_case_81 - case 24: - goto st_case_24 - case 25: - goto st_case_25 - case 82: - goto st_case_82 - case 83: - goto st_case_83 - case 26: - goto st_case_26 - case 27: - goto st_case_27 - case 28: - goto st_case_28 - case 29: - goto st_case_29 - case 30: - goto st_case_30 - case 31: - goto st_case_31 - case 32: - goto st_case_32 - case 33: - goto st_case_33 - case 34: - goto st_case_34 - case 84: - goto st_case_84 - case 37: - goto st_case_37 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 88: - goto st_case_88 - case 42: - goto st_case_42 - case 89: - goto st_case_89 - case 43: - goto st_case_43 - case 44: - goto st_case_44 - case 45: - goto st_case_45 - case 85: - goto st_case_85 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - } - goto st_out + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof + } + _resume: + switch m.cs { + case 46: + goto st_case_46 + case 1: + goto st_case_1 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 0: + goto st_case_0 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 + case 59: + goto st_case_59 + case 60: + goto st_case_60 + case 61: + goto st_case_61 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 70: + goto st_case_70 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 71: + goto st_case_71 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 + case 75: + goto st_case_75 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 80: + goto st_case_80 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 81: + goto st_case_81 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 84: + goto st_case_84 + case 37: + goto st_case_37 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 + case 88: + goto st_case_88 + case 42: + goto st_case_42 + case 89: + goto st_case_89 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 85: + goto st_case_85 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + } + goto st_out st46: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof46 } st_case_46: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -561,33 +553,33 @@ _resume: case 92: goto tr81 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr80 } goto tr79 -tr29: + tr29: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 -tr79: + goto st1 + tr79: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st1 + goto st1 st1: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof1 } st_case_1: //line plugins/parsers/influx/machine.go:590 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -599,43 +591,49 @@ tr79: case 92: goto st8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr1 } goto st1 -tr1: - ( m.cs) = 2 + tr1: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr56: - ( m.cs) = 2 + goto _again + tr56: + (m.cs) = 2 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st2: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof2 } st_case_2: //line plugins/parsers/influx/machine.go:638 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr7 case 13: @@ -649,23 +647,23 @@ tr56: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st2 } goto tr5 -tr5: + tr5: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st3 + goto st3 st3: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof3 } st_case_3: //line plugins/parsers/influx/machine.go:668 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -675,99 +673,119 @@ tr5: case 92: goto st12 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr2: - ( m.cs) = 0 + tr2: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr7: - ( m.cs) = 0 + goto _again + tr7: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr31: - ( m.cs) = 0 + goto _again + tr31: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr35: - ( m.cs) = 0 + goto _again + tr35: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr82: - ( m.cs) = 0 + goto _again + tr82: + (m.cs) = 0 //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } + (m.cs) = 34 + { + (m.p)++ + goto _out + } - goto _again -tr135: + goto _again + tr135: //line plugins/parsers/influx/machine.go.rl:73 - ( m.p)-- + (m.p)-- - {goto st46 } + { + goto st46 + } - goto st0 + goto st0 //line plugins/parsers/influx/machine.go:754 -st_case_0: + st_case_0: st0: - ( m.cs) = 0 + (m.cs) = 0 goto _out -tr10: + tr10: //line plugins/parsers/influx/machine.go.rl:108 - m.key = m.text() + m.key = m.text() - goto st4 + goto st4 st4: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof4 } st_case_4: //line plugins/parsers/influx/machine.go:770 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st5 case 45: @@ -785,16 +803,16 @@ tr10: case 116: goto tr20 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr16 } goto tr7 st5: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof5 } st_case_5: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr22 case 34: @@ -803,39 +821,39 @@ tr10: goto tr24 } goto tr21 -tr21: + tr21: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st6 -tr22: + goto st6 + tr22: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 -tr26: + goto st6 + tr26: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st6 + goto st6 st6: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof6 } st_case_6: //line plugins/parsers/influx/machine.go:838 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr26 case 34: @@ -844,43 +862,49 @@ tr26: goto st13 } goto st6 -tr23: - ( m.cs) = 47 + tr23: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr27: - ( m.cs) = 47 + goto _again + tr27: + (m.cs) = 47 //line plugins/parsers/influx/machine.go.rl:148 - err = m.handler.AddString(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddString(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st47: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof47 } st_case_47: //line plugins/parsers/influx/machine.go:883 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -890,69 +914,81 @@ tr27: case 44: goto st11 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st48 } goto tr82 -tr110: - ( m.cs) = 48 + tr110: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr117: - ( m.cs) = 48 + goto _again + tr117: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr122: - ( m.cs) = 48 + goto _again + tr122: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr127: - ( m.cs) = 48 + goto _again + tr127: + (m.cs) = 48 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st48: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof48 } st_case_48: //line plugins/parsers/influx/machine.go:955 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -963,130 +999,148 @@ tr127: goto tr86 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto tr87 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto st48 } goto tr35 -tr34: + tr34: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st49 -tr89: - ( m.cs) = 49 + goto st49 + tr89: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr111: - ( m.cs) = 49 + goto _again + tr111: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr118: - ( m.cs) = 49 + goto _again + tr118: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr123: - ( m.cs) = 49 + goto _again + tr123: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr128: - ( m.cs) = 49 + goto _again + tr128: + (m.cs) = 49 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st49: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof49 } st_case_49: //line plugins/parsers/influx/machine.go:1089 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1100,23 +1154,23 @@ tr128: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr80: + tr80: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true - goto st7 + goto st7 st7: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof7 } st_case_7: //line plugins/parsers/influx/machine.go:1119 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr31 case 13: @@ -1130,140 +1184,155 @@ tr80: case 92: goto tr32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st7 } goto tr29 -tr32: + tr32: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 -tr81: + goto st8 + tr81: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st8 + goto st8 st8: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof8 } st_case_8: //line plugins/parsers/influx/machine.go:1159 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st1 -tr90: - ( m.cs) = 9 + tr90: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr112: - ( m.cs) = 9 + goto _again + tr112: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr119: - ( m.cs) = 9 + goto _again + tr119: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr124: - ( m.cs) = 9 + goto _again + tr124: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr129: - ( m.cs) = 9 + goto _again + tr129: + (m.cs) = 9 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st9: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof9 } st_case_9: //line plugins/parsers/influx/machine.go:1234 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr34 } goto st0 -tr86: + tr86: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st10 + goto st10 st10: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof10 } st_case_10: //line plugins/parsers/influx/machine.go:1250 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st50 } goto tr35 -tr87: + tr87: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st50 + goto st50 st50: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof50 } st_case_50: //line plugins/parsers/influx/machine.go:1266 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1272,34 +1341,37 @@ tr87: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st52 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 -tr88: - ( m.cs) = 51 + tr88: + (m.cs) = 51 //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st51: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof51 } st_case_51: //line plugins/parsers/influx/machine.go:1302 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr34 case 13: @@ -1307,16 +1379,16 @@ tr88: case 32: goto st51 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st51 } goto st0 st52: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof52 } st_case_52: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1325,20 +1397,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st53 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st53: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof53 } st_case_53: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1347,20 +1419,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st54 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st54: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof54 } st_case_54: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1369,20 +1441,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st55 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st55: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof55 } st_case_55: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1391,20 +1463,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st56 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st56: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof56 } st_case_56: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1413,20 +1485,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st57 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st57: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof57 } st_case_57: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1435,20 +1507,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st58 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st58: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof58 } st_case_58: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1457,20 +1529,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st59 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st59: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof59 } st_case_59: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1479,20 +1551,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st60 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st60: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof60 } st_case_60: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1501,20 +1573,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st61 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st61: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof61 } st_case_61: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1523,20 +1595,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st62 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st62: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof62 } st_case_62: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1545,20 +1617,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st63 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st63: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof63 } st_case_63: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1567,20 +1639,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st64 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st64: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof64 } st_case_64: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1589,20 +1661,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st65 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st65: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof65 } st_case_65: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1611,20 +1683,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st66 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st66: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof66 } st_case_66: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1633,20 +1705,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st67 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st67: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof67 } st_case_67: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1655,20 +1727,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st68 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st68: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof68 } st_case_68: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1677,20 +1749,20 @@ tr88: goto tr88 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st69 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr88 } goto tr35 st69: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof69 } st_case_69: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr89 case 13: @@ -1698,69 +1770,81 @@ tr88: case 32: goto tr88 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr88 } goto tr35 -tr113: - ( m.cs) = 11 + tr113: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr120: - ( m.cs) = 11 + goto _again + tr120: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr125: - ( m.cs) = 11 + goto _again + tr125: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr130: - ( m.cs) = 11 + goto _again + tr130: + (m.cs) = 11 //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st11: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof11 } st_case_11: //line plugins/parsers/influx/machine.go:1763 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr7 case 44: @@ -1770,89 +1854,89 @@ tr130: case 92: goto tr8 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto tr5 -tr8: + tr8: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st12 + goto st12 st12: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof12 } st_case_12: //line plugins/parsers/influx/machine.go:1789 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr7 } goto st3 -tr24: + tr24: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st13 + goto st13 st13: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof13 } st_case_13: //line plugins/parsers/influx/machine.go:1805 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st6 case 92: goto st6 } goto tr7 -tr13: + tr13: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st14 + goto st14 st14: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof14 } st_case_14: //line plugins/parsers/influx/machine.go:1824 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 46: goto st15 case 48: goto st72 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } goto tr7 -tr14: + tr14: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st15 + goto st15 st15: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof15 } st_case_15: //line plugins/parsers/influx/machine.go:1846 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } goto tr7 st70: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof70 } st_case_70: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1867,20 +1951,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st70 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st16: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof16 } st_case_16: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 34: goto st17 case 43: @@ -1888,25 +1972,25 @@ tr14: case 45: goto st17 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st17: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof17 } st_case_17: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } goto tr7 st71: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof71 } st_case_71: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1917,20 +2001,20 @@ tr14: goto tr113 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st71 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st72: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof72 } st_case_72: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1949,20 +2033,20 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st73: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof73 } st_case_73: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -1979,20 +2063,20 @@ tr14: goto st16 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st74: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof74 } st_case_74: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr118 case 13: @@ -2002,16 +2086,16 @@ tr14: case 44: goto tr120 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr117 } goto tr82 st75: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof75 } st_case_75: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2030,27 +2114,27 @@ tr14: goto st74 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st75 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr15: + tr15: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st76 + goto st76 st76: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof76 } st_case_76: //line plugins/parsers/influx/machine.go:2053 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2071,20 +2155,20 @@ tr15: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st73 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 st77: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof77 } st_case_77: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr123 case 13: @@ -2094,23 +2178,23 @@ tr15: case 44: goto tr125 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr122 } goto tr82 -tr16: + tr16: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st78 + goto st78 st78: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof78 } st_case_78: //line plugins/parsers/influx/machine.go:2113 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr111 case 13: @@ -2131,27 +2215,27 @@ tr16: goto st77 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + case (m.data)[(m.p)] > 12: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { goto st78 } - case ( m.data)[( m.p)] >= 9: + case (m.data)[(m.p)] >= 9: goto tr110 } goto tr82 -tr17: + tr17: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st79 + goto st79 st79: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof79 } st_case_79: //line plugins/parsers/influx/machine.go:2154 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2165,43 +2249,43 @@ tr17: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st18: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof18 } st_case_18: - if ( m.data)[( m.p)] == 76 { + if (m.data)[(m.p)] == 76 { goto st19 } goto tr7 st19: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof19 } st_case_19: - if ( m.data)[( m.p)] == 83 { + if (m.data)[(m.p)] == 83 { goto st20 } goto tr7 st20: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof20 } st_case_20: - if ( m.data)[( m.p)] == 69 { + if (m.data)[(m.p)] == 69 { goto st80 } goto tr7 st80: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof80 } st_case_80: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2211,50 +2295,50 @@ tr17: case 44: goto tr130 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st21: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof21 } st_case_21: - if ( m.data)[( m.p)] == 108 { + if (m.data)[(m.p)] == 108 { goto st22 } goto tr7 st22: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof22 } st_case_22: - if ( m.data)[( m.p)] == 115 { + if (m.data)[(m.p)] == 115 { goto st23 } goto tr7 st23: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof23 } st_case_23: - if ( m.data)[( m.p)] == 101 { + if (m.data)[(m.p)] == 101 { goto st80 } goto tr7 -tr18: + tr18: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st81 + goto st81 st81: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof81 } st_case_81: //line plugins/parsers/influx/machine.go:2257 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2268,41 +2352,41 @@ tr18: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 st24: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof24 } st_case_24: - if ( m.data)[( m.p)] == 85 { + if (m.data)[(m.p)] == 85 { goto st20 } goto tr7 st25: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof25 } st_case_25: - if ( m.data)[( m.p)] == 117 { + if (m.data)[(m.p)] == 117 { goto st23 } goto tr7 -tr19: + tr19: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st82 + goto st82 st82: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof82 } st_case_82: //line plugins/parsers/influx/machine.go:2305 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2314,23 +2398,23 @@ tr19: case 97: goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr20: + tr20: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st83 + goto st83 st83: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof83 } st_case_83: //line plugins/parsers/influx/machine.go:2333 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr128 case 13: @@ -2342,43 +2426,49 @@ tr20: case 114: goto st25 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr127 } goto tr82 -tr3: - ( m.cs) = 26 + tr3: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr57: - ( m.cs) = 26 + goto _again + tr57: + (m.cs) = 26 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st26: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof26 } st_case_26: //line plugins/parsers/influx/machine.go:2381 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2388,23 +2478,23 @@ tr57: case 92: goto tr49 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr48 -tr48: + tr48: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st27 + goto st27 st27: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof27 } st_case_27: //line plugins/parsers/influx/machine.go:2407 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2414,23 +2504,23 @@ tr48: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 -tr51: + tr51: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st28 + goto st28 st28: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof28 } st_case_28: //line plugins/parsers/influx/machine.go:2433 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2440,23 +2530,23 @@ tr51: case 92: goto tr54 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr53 -tr53: + tr53: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st29 + goto st29 st29: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof29 } st_case_29: //line plugins/parsers/influx/machine.go:2459 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2470,39 +2560,39 @@ tr53: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr54: + tr54: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st30 + goto st30 st30: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof30 } st_case_30: //line plugins/parsers/influx/machine.go:2489 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st31 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st29 st31: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof31 } st_case_31: //line plugins/parsers/influx/machine.go:2505 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr2 case 13: @@ -2516,39 +2606,39 @@ tr54: case 92: goto st30 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr56 } goto st29 -tr49: + tr49: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st32 + goto st32 st32: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof32 } st_case_32: //line plugins/parsers/influx/machine.go:2535 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st33 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st33: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof33 } st_case_33: //line plugins/parsers/influx/machine.go:2551 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2558,44 +2648,46 @@ tr49: case 92: goto st32 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st27 st34: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof34 } st_case_34: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr62 } goto st34 -tr62: + tr62: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line //line plugins/parsers/influx/machine.go.rl:78 - {goto st85 } + { + goto st85 + } - goto st84 + goto st84 st84: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof84 } st_case_84: //line plugins/parsers/influx/machine.go:2592 goto st0 st37: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof37 } st_case_37: - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr31 case 35: @@ -2605,27 +2697,27 @@ tr62: case 92: goto tr66 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr31 } goto tr65 -tr65: + tr65: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st86 + goto st86 st86: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof86 } st_case_86: //line plugins/parsers/influx/machine.go:2628 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr138 case 13: @@ -2637,138 +2729,159 @@ tr65: case 92: goto st45 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st86 -tr67: + tr67: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st87 -tr138: - ( m.cs) = 87 + goto st87 + tr138: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again -tr142: - ( m.cs) = 87 + goto _again + tr142: + (m.cs) = 87 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto _again + goto _again st87: //line plugins/parsers/influx/machine.go.rl:172 - m.finishMetric = true - ( m.cs) = 85; - {( m.p)++; goto _out } + m.finishMetric = true + (m.cs) = 85 + { + (m.p)++ + goto _out + } - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof87 } st_case_87: //line plugins/parsers/influx/machine.go:2702 goto st0 -tr139: - ( m.cs) = 38 + tr139: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr143: - ( m.cs) = 38 + goto _again + tr143: + (m.cs) = 38 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st38: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof38 } st_case_38: //line plugins/parsers/influx/machine.go:2735 - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr67 } goto st0 -tr140: - ( m.cs) = 39 + tr140: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again -tr144: - ( m.cs) = 39 + goto _again + tr144: + (m.cs) = 39 //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + goto _out + } + } - goto _again + goto _again st39: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof39 } st_case_39: //line plugins/parsers/influx/machine.go:2771 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2778,23 +2891,23 @@ tr144: case 92: goto tr69 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr68 -tr68: + tr68: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st40 + goto st40 st40: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof40 } st_case_40: //line plugins/parsers/influx/machine.go:2797 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2804,23 +2917,23 @@ tr68: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr71: + tr71: //line plugins/parsers/influx/machine.go.rl:95 - m.key = m.text() + m.key = m.text() - goto st41 + goto st41 st41: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof41 } st_case_41: //line plugins/parsers/influx/machine.go:2823 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2830,23 +2943,23 @@ tr71: case 92: goto tr74 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto tr73 -tr73: + tr73: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st88 + goto st88 st88: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof88 } st_case_88: //line plugins/parsers/influx/machine.go:2849 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2860,39 +2973,39 @@ tr73: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr74: + tr74: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st42 + goto st42 st42: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof42 } st_case_42: //line plugins/parsers/influx/machine.go:2879 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st89 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st88 st89: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof89 } st_case_89: //line plugins/parsers/influx/machine.go:2895 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr142 case 13: @@ -2906,39 +3019,39 @@ tr74: case 92: goto st42 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto tr2 } goto st88 -tr69: + tr69: //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st43 + goto st43 st43: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof43 } st_case_43: //line plugins/parsers/influx/machine.go:2925 - if ( m.data)[( m.p)] == 92 { + if (m.data)[(m.p)] == 92 { goto st44 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 st44: //line plugins/parsers/influx/machine.go.rl:248 - ( m.p)-- - - if ( m.p)++; ( m.p) == ( m.pe) { + (m.p)-- + + if (m.p)++; (m.p) == (m.pe) { goto _test_eof44 } st_case_44: //line plugins/parsers/influx/machine.go:2941 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 32: goto tr2 case 44: @@ -2948,45 +3061,45 @@ tr69: case 92: goto st43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto tr2 } goto st40 -tr66: + tr66: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:28 - m.pb = m.p + m.pb = m.p - goto st45 + goto st45 st45: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof45 } st_case_45: //line plugins/parsers/influx/machine.go:2971 - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 13 { goto st0 } goto st86 -tr63: + tr63: //line plugins/parsers/influx/machine.go.rl:166 - m.lineno++ - m.sol = m.p - m.sol++ // next char will be the first column in the line + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line - goto st85 + goto st85 st85: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof85 } st_case_85: //line plugins/parsers/influx/machine.go:2989 - switch ( m.data)[( m.p)] { + switch (m.data)[(m.p)] { case 10: goto tr63 case 13: @@ -2996,312 +3109,554 @@ tr63: case 35: goto st36 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + if 9 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 12 { goto st85 } goto tr135 st35: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof35 } st_case_35: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st0 st36: - if ( m.p)++; ( m.p) == ( m.pe) { + if (m.p)++; (m.p) == (m.pe) { goto _test_eof36 } st_case_36: - if ( m.data)[( m.p)] == 10 { + if (m.data)[(m.p)] == 10 { goto tr63 } goto st36 st_out: - _test_eof46: ( m.cs) = 46; goto _test_eof - _test_eof1: ( m.cs) = 1; goto _test_eof - _test_eof2: ( m.cs) = 2; goto _test_eof - _test_eof3: ( m.cs) = 3; goto _test_eof - _test_eof4: ( m.cs) = 4; goto _test_eof - _test_eof5: ( m.cs) = 5; goto _test_eof - _test_eof6: ( m.cs) = 6; goto _test_eof - _test_eof47: ( m.cs) = 47; goto _test_eof - _test_eof48: ( m.cs) = 48; goto _test_eof - _test_eof49: ( m.cs) = 49; goto _test_eof - _test_eof7: ( m.cs) = 7; goto _test_eof - _test_eof8: ( m.cs) = 8; goto _test_eof - _test_eof9: ( m.cs) = 9; goto _test_eof - _test_eof10: ( m.cs) = 10; goto _test_eof - _test_eof50: ( m.cs) = 50; goto _test_eof - _test_eof51: ( m.cs) = 51; goto _test_eof - _test_eof52: ( m.cs) = 52; goto _test_eof - _test_eof53: ( m.cs) = 53; goto _test_eof - _test_eof54: ( m.cs) = 54; goto _test_eof - _test_eof55: ( m.cs) = 55; goto _test_eof - _test_eof56: ( m.cs) = 56; goto _test_eof - _test_eof57: ( m.cs) = 57; goto _test_eof - _test_eof58: ( m.cs) = 58; goto _test_eof - _test_eof59: ( m.cs) = 59; goto _test_eof - _test_eof60: ( m.cs) = 60; goto _test_eof - _test_eof61: ( m.cs) = 61; goto _test_eof - _test_eof62: ( m.cs) = 62; goto _test_eof - _test_eof63: ( m.cs) = 63; goto _test_eof - _test_eof64: ( m.cs) = 64; goto _test_eof - _test_eof65: ( m.cs) = 65; goto _test_eof - _test_eof66: ( m.cs) = 66; goto _test_eof - _test_eof67: ( m.cs) = 67; goto _test_eof - _test_eof68: ( m.cs) = 68; goto _test_eof - _test_eof69: ( m.cs) = 69; goto _test_eof - _test_eof11: ( m.cs) = 11; goto _test_eof - _test_eof12: ( m.cs) = 12; goto _test_eof - _test_eof13: ( m.cs) = 13; goto _test_eof - _test_eof14: ( m.cs) = 14; goto _test_eof - _test_eof15: ( m.cs) = 15; goto _test_eof - _test_eof70: ( m.cs) = 70; goto _test_eof - _test_eof16: ( m.cs) = 16; goto _test_eof - _test_eof17: ( m.cs) = 17; goto _test_eof - _test_eof71: ( m.cs) = 71; goto _test_eof - _test_eof72: ( m.cs) = 72; goto _test_eof - _test_eof73: ( m.cs) = 73; goto _test_eof - _test_eof74: ( m.cs) = 74; goto _test_eof - _test_eof75: ( m.cs) = 75; goto _test_eof - _test_eof76: ( m.cs) = 76; goto _test_eof - _test_eof77: ( m.cs) = 77; goto _test_eof - _test_eof78: ( m.cs) = 78; goto _test_eof - _test_eof79: ( m.cs) = 79; goto _test_eof - _test_eof18: ( m.cs) = 18; goto _test_eof - _test_eof19: ( m.cs) = 19; goto _test_eof - _test_eof20: ( m.cs) = 20; goto _test_eof - _test_eof80: ( m.cs) = 80; goto _test_eof - _test_eof21: ( m.cs) = 21; goto _test_eof - _test_eof22: ( m.cs) = 22; goto _test_eof - _test_eof23: ( m.cs) = 23; goto _test_eof - _test_eof81: ( m.cs) = 81; goto _test_eof - _test_eof24: ( m.cs) = 24; goto _test_eof - _test_eof25: ( m.cs) = 25; goto _test_eof - _test_eof82: ( m.cs) = 82; goto _test_eof - _test_eof83: ( m.cs) = 83; goto _test_eof - _test_eof26: ( m.cs) = 26; goto _test_eof - _test_eof27: ( m.cs) = 27; goto _test_eof - _test_eof28: ( m.cs) = 28; goto _test_eof - _test_eof29: ( m.cs) = 29; goto _test_eof - _test_eof30: ( m.cs) = 30; goto _test_eof - _test_eof31: ( m.cs) = 31; goto _test_eof - _test_eof32: ( m.cs) = 32; goto _test_eof - _test_eof33: ( m.cs) = 33; goto _test_eof - _test_eof34: ( m.cs) = 34; goto _test_eof - _test_eof84: ( m.cs) = 84; goto _test_eof - _test_eof37: ( m.cs) = 37; goto _test_eof - _test_eof86: ( m.cs) = 86; goto _test_eof - _test_eof87: ( m.cs) = 87; goto _test_eof - _test_eof38: ( m.cs) = 38; goto _test_eof - _test_eof39: ( m.cs) = 39; goto _test_eof - _test_eof40: ( m.cs) = 40; goto _test_eof - _test_eof41: ( m.cs) = 41; goto _test_eof - _test_eof88: ( m.cs) = 88; goto _test_eof - _test_eof42: ( m.cs) = 42; goto _test_eof - _test_eof89: ( m.cs) = 89; goto _test_eof - _test_eof43: ( m.cs) = 43; goto _test_eof - _test_eof44: ( m.cs) = 44; goto _test_eof - _test_eof45: ( m.cs) = 45; goto _test_eof - _test_eof85: ( m.cs) = 85; goto _test_eof - _test_eof35: ( m.cs) = 35; goto _test_eof - _test_eof36: ( m.cs) = 36; goto _test_eof - - _test_eof: {} - if ( m.p) == ( m.eof) { - switch ( m.cs) { - case 7, 37: + _test_eof46: + (m.cs) = 46 + goto _test_eof + _test_eof1: + (m.cs) = 1 + goto _test_eof + _test_eof2: + (m.cs) = 2 + goto _test_eof + _test_eof3: + (m.cs) = 3 + goto _test_eof + _test_eof4: + (m.cs) = 4 + goto _test_eof + _test_eof5: + (m.cs) = 5 + goto _test_eof + _test_eof6: + (m.cs) = 6 + goto _test_eof + _test_eof47: + (m.cs) = 47 + goto _test_eof + _test_eof48: + (m.cs) = 48 + goto _test_eof + _test_eof49: + (m.cs) = 49 + goto _test_eof + _test_eof7: + (m.cs) = 7 + goto _test_eof + _test_eof8: + (m.cs) = 8 + goto _test_eof + _test_eof9: + (m.cs) = 9 + goto _test_eof + _test_eof10: + (m.cs) = 10 + goto _test_eof + _test_eof50: + (m.cs) = 50 + goto _test_eof + _test_eof51: + (m.cs) = 51 + goto _test_eof + _test_eof52: + (m.cs) = 52 + goto _test_eof + _test_eof53: + (m.cs) = 53 + goto _test_eof + _test_eof54: + (m.cs) = 54 + goto _test_eof + _test_eof55: + (m.cs) = 55 + goto _test_eof + _test_eof56: + (m.cs) = 56 + goto _test_eof + _test_eof57: + (m.cs) = 57 + goto _test_eof + _test_eof58: + (m.cs) = 58 + goto _test_eof + _test_eof59: + (m.cs) = 59 + goto _test_eof + _test_eof60: + (m.cs) = 60 + goto _test_eof + _test_eof61: + (m.cs) = 61 + goto _test_eof + _test_eof62: + (m.cs) = 62 + goto _test_eof + _test_eof63: + (m.cs) = 63 + goto _test_eof + _test_eof64: + (m.cs) = 64 + goto _test_eof + _test_eof65: + (m.cs) = 65 + goto _test_eof + _test_eof66: + (m.cs) = 66 + goto _test_eof + _test_eof67: + (m.cs) = 67 + goto _test_eof + _test_eof68: + (m.cs) = 68 + goto _test_eof + _test_eof69: + (m.cs) = 69 + goto _test_eof + _test_eof11: + (m.cs) = 11 + goto _test_eof + _test_eof12: + (m.cs) = 12 + goto _test_eof + _test_eof13: + (m.cs) = 13 + goto _test_eof + _test_eof14: + (m.cs) = 14 + goto _test_eof + _test_eof15: + (m.cs) = 15 + goto _test_eof + _test_eof70: + (m.cs) = 70 + goto _test_eof + _test_eof16: + (m.cs) = 16 + goto _test_eof + _test_eof17: + (m.cs) = 17 + goto _test_eof + _test_eof71: + (m.cs) = 71 + goto _test_eof + _test_eof72: + (m.cs) = 72 + goto _test_eof + _test_eof73: + (m.cs) = 73 + goto _test_eof + _test_eof74: + (m.cs) = 74 + goto _test_eof + _test_eof75: + (m.cs) = 75 + goto _test_eof + _test_eof76: + (m.cs) = 76 + goto _test_eof + _test_eof77: + (m.cs) = 77 + goto _test_eof + _test_eof78: + (m.cs) = 78 + goto _test_eof + _test_eof79: + (m.cs) = 79 + goto _test_eof + _test_eof18: + (m.cs) = 18 + goto _test_eof + _test_eof19: + (m.cs) = 19 + goto _test_eof + _test_eof20: + (m.cs) = 20 + goto _test_eof + _test_eof80: + (m.cs) = 80 + goto _test_eof + _test_eof21: + (m.cs) = 21 + goto _test_eof + _test_eof22: + (m.cs) = 22 + goto _test_eof + _test_eof23: + (m.cs) = 23 + goto _test_eof + _test_eof81: + (m.cs) = 81 + goto _test_eof + _test_eof24: + (m.cs) = 24 + goto _test_eof + _test_eof25: + (m.cs) = 25 + goto _test_eof + _test_eof82: + (m.cs) = 82 + goto _test_eof + _test_eof83: + (m.cs) = 83 + goto _test_eof + _test_eof26: + (m.cs) = 26 + goto _test_eof + _test_eof27: + (m.cs) = 27 + goto _test_eof + _test_eof28: + (m.cs) = 28 + goto _test_eof + _test_eof29: + (m.cs) = 29 + goto _test_eof + _test_eof30: + (m.cs) = 30 + goto _test_eof + _test_eof31: + (m.cs) = 31 + goto _test_eof + _test_eof32: + (m.cs) = 32 + goto _test_eof + _test_eof33: + (m.cs) = 33 + goto _test_eof + _test_eof34: + (m.cs) = 34 + goto _test_eof + _test_eof84: + (m.cs) = 84 + goto _test_eof + _test_eof37: + (m.cs) = 37 + goto _test_eof + _test_eof86: + (m.cs) = 86 + goto _test_eof + _test_eof87: + (m.cs) = 87 + goto _test_eof + _test_eof38: + (m.cs) = 38 + goto _test_eof + _test_eof39: + (m.cs) = 39 + goto _test_eof + _test_eof40: + (m.cs) = 40 + goto _test_eof + _test_eof41: + (m.cs) = 41 + goto _test_eof + _test_eof88: + (m.cs) = 88 + goto _test_eof + _test_eof42: + (m.cs) = 42 + goto _test_eof + _test_eof89: + (m.cs) = 89 + goto _test_eof + _test_eof43: + (m.cs) = 43 + goto _test_eof + _test_eof44: + (m.cs) = 44 + goto _test_eof + _test_eof45: + (m.cs) = 45 + goto _test_eof + _test_eof85: + (m.cs) = 85 + goto _test_eof + _test_eof35: + (m.cs) = 35 + goto _test_eof + _test_eof36: + (m.cs) = 36 + goto _test_eof + + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 7, 37: //line plugins/parsers/influx/machine.go.rl:32 - err = ErrNameParse - ( m.p)-- + err = ErrNameParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: + case 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25: //line plugins/parsers/influx/machine.go.rl:39 - err = ErrFieldParse - ( m.p)-- + err = ErrFieldParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: + case 26, 27, 28, 30, 32, 33, 39, 40, 41, 42, 43, 44: //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 10: + case 10: //line plugins/parsers/influx/machine.go.rl:53 - err = ErrTimestampParse - ( m.p)-- + err = ErrTimestampParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 86: + case 86: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 88, 89: + case 88, 89: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } - case 47, 48, 49, 51: + case 47, 48, 49, 51: //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 46: + case 46: //line plugins/parsers/influx/machine.go.rl:82 - m.beginMetric = true + m.beginMetric = true //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 1: + case 1: //line plugins/parsers/influx/machine.go.rl:86 - err = m.handler.SetMeasurement(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetMeasurement(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 29, 31: + case 29, 31: //line plugins/parsers/influx/machine.go.rl:99 - err = m.handler.AddTag(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:46 - err = ErrTagParse - ( m.p)-- + err = ErrTagParse + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } - case 74: + case 74: //line plugins/parsers/influx/machine.go.rl:112 - err = m.handler.AddInt(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 77: + case 77: //line plugins/parsers/influx/machine.go.rl:121 - err = m.handler.AddUint(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 70, 71, 72, 73, 75, 76, 78: + case 70, 71, 72, 73, 75, 76, 78: //line plugins/parsers/influx/machine.go.rl:130 - err = m.handler.AddFloat(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 79, 80, 81, 82, 83: + case 79, 80, 81, 82, 83: //line plugins/parsers/influx/machine.go.rl:139 - err = m.handler.AddBool(m.key, m.text()) - if err != nil { - ( m.p)-- + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true - case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: + case 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69: //line plugins/parsers/influx/machine.go.rl:157 - err = m.handler.SetTimestamp(m.text()) - if err != nil { - ( m.p)-- + err = m.handler.SetTimestamp(m.text()) + if err != nil { + (m.p)-- - ( m.cs) = 34; - {( m.p)++; ( m.cs) = 0; goto _out } - } + (m.cs) = 34 + { + (m.p)++ + (m.cs) = 0 + goto _out + } + } //line plugins/parsers/influx/machine.go.rl:178 - m.finishMetric = true + m.finishMetric = true //line plugins/parsers/influx/machine.go:3301 + } } - } - _out: {} + _out: + { + } } //line plugins/parsers/influx/machine.go.rl:415 @@ -3364,7 +3719,7 @@ type streamMachine struct { func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { m := &streamMachine{ machine: NewMachine(handler), - reader: r, + reader: r, } m.machine.SetData(make([]byte, 1024)) @@ -3392,13 +3747,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -3409,6 +3757,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2*len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index 29f4307860ea2..d6b5d949e4065 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -499,13 +499,6 @@ func (m *streamMachine) Next() error { m.machine.finishMetric = false for { - // Expand the buffer if it is full - if m.machine.pe == len(m.machine.data) { - expanded := make([]byte, 2 * len(m.machine.data)) - copy(expanded, m.machine.data) - m.machine.data = expanded - } - err := m.machine.exec() if err != nil { return err @@ -516,6 +509,13 @@ func (m *streamMachine) Next() error { break } + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) if n == 0 && err == io.EOF { m.machine.eof = m.machine.pe diff --git a/plugins/parsers/json_v2/README.md b/plugins/parsers/json_v2/README.md index a1effd5940614..d1e2e9c407255 100644 --- a/plugins/parsers/json_v2/README.md +++ b/plugins/parsers/json_v2/README.md @@ -1,10 +1,10 @@ # JSON Parser - Version 2 -This parser takes valid JSON input and turns it into metrics. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. +This parser takes valid JSON input and turns it into line protocol. The query syntax supported is [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md), you can go to this playground to test out your GJSON path here: https://gjson.dev/. You can find multiple examples under the `testdata` folder. ## Configuration -You configure this parser by describing the metric you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. +You configure this parser by describing the line protocol you want by defining the fields and tags from the input. The configuration is divided into config sub-tables called `field`, `tag`, and `object`. In the example below you can see all the possible configuration keys you can define for each config table. In the sections that follow these configuration keys are defined in more detail. **Example configuration:** @@ -19,27 +19,45 @@ You configure this parser by describing the metric you want by defining the fiel timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) [[inputs.file.json_v2.tag]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key [[inputs.file.json_v2.field]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax to a non-array/non-object value rename = "new name" # A string with a new name for the tag key type = "int" # A string specifying the type (int,uint,float,string,bool) [[inputs.file.json_v2.object]] - path = "" # A string with valid GJSON path syntax + path = "" # A string with valid GJSON path syntax, can include array's and object's + + ## Configuration to define what JSON keys should be used as timestamps ## timestamp_key = "" # A JSON key (for a nested key, prepend the parent keys with underscores) to a valid timestamp timestamp_format = "" # A string with a valid timestamp format (see below for possible values) timestamp_timezone = "" # A string with with a valid timezone (see below for possible values) - disable_prepend_keys = false (or true, just not both) + + ### Configuration to define what JSON keys should be included and how (field/tag) ### + tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field, when adding a JSON key in this list you don't have to define it in the included_keys list included_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result excluded_keys = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that shouldn't be included in result - tags = [] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + # When a tag/field sub-table is defined, they will be the only field/tag's along with any keys defined in the included_keys list. + # If the resulting values aren't included in the object/array returned by the root object path, it won't be included. + # You can define as many tag/field sub-tables as you want. + [[inputs.file.json_v2.object.tag]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + [[inputs.file.json_v2.object.field]] + path = "" # # A string with valid GJSON path syntax, can include array's and object's + rename = "new name" # A string with a new name for the tag key + type = "int" # A string specifying the type (int,uint,float,string,bool) + + ### Configuration to modify the resutling line protocol ### + disable_prepend_keys = false (or true, just not both) [inputs.file.json_v2.object.renames] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a new name for the tag key key = "new name" [inputs.file.json_v2.object.fields] # A map of JSON keys (for a nested key, prepend the parent keys with underscores) with a type (int,uint,float,string,bool) key = "int" ``` + --- + ### root config options * **measurement_name (OPTIONAL)**: Will set the measurement name to the provided string. @@ -56,7 +74,7 @@ such as `America/New_York`, to `Local` to utilize the system timezone, or to `UT ### `field` and `tag` config options -`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/), which is used to define a `metric`. You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a metric from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling metric that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate metric (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). +`field` and `tag` represent the elements of [line protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/). You can use the `field` and `tag` config tables to gather a single value or an array of values that all share the same type and name. With this you can add a field or tag to a line protocol from data stored anywhere in your JSON. If you define the GJSON path to return a single value then you will get a single resutling line protocol that contains the field/tag. If you define the GJSON path to return an array of values, then each field/tag will be put into a separate line protocol (you use the # character to retrieve JSON arrays, find examples [here](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md#arrays)). Note that objects are handled separately, therefore if you provide a path that returns a object it will be ignored. You will need use the `object` config table to parse objects, because `field` and `tag` doesn't handle relationships between data. Each `field` and `tag` you define is handled as a separate data point. @@ -70,26 +88,34 @@ The notable difference between `field` and `tag`, is that `tag` values will alwa #### **field** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this field configuration you can gather a non-array/non-object values. Note this acts as a global field when used with the `object` configuration, if you gather an array of values using `object` then the field gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. * **type (OPTIONAL)**: You can define a string value to set the desired type (float, int, uint, string, bool). If not defined it won't enforce a type and default to using the original type defined in the JSON (bool, float, or string). #### **tag** -* **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md). +Using this tag configuration you can gather a non-array/non-object values. Note this acts as a global tag when used with the `object` configuration, if you gather an array of values using `object` then the tag gathered will be added to each resulting line protocol without acknowledging its location in the original JSON. This is defined in TOML as an array table using double brackets. + + +* **path (REQUIRED)**: A string with valid GJSON path syntax to a non-array/non-object value * **name (OPTIONAL)**: You can define a string value to set the field name. If not defined it will use the trailing word from the provided query. For good examples in using `field` and `tag` you can reference the following example configs: -* [fields_and_tags](testdata/fields_and_tags/telegraf.conf) --- + ### object -With the configuration section `object`, you can gather metrics from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). +With the configuration section `object`, you can gather values from [JSON objects](https://www.w3schools.com/js/js_json_objects.asp). This is defined in TOML as an array table using double brackets. -The following keys can be set for `object`: +#### The following keys can be set for `object` * **path (REQUIRED)**: You must define the path query that gathers the object with [GJSON Path Syntax](https://github.com/tidwall/gjson/blob/v1.7.5/SYNTAX.md) + +*Keys to define what JSON keys should be used as timestamps:* + * **timestamp_key(OPTIONAL)**: You can define a json key (for a nested key, prepend the parent keys with underscores) for the value to be set as the timestamp from the JSON input. * **timestamp_format (OPTIONAL, but REQUIRED when timestamp_query is defined**: Must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or the Go "reference time" which is defined to be the specific time: @@ -97,22 +123,30 @@ the Go "reference time" which is defined to be the specific time: * **timestamp_timezone (OPTIONAL, but REQUIRES timestamp_query**: This option should be set to a [Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. Defaults to `UTC` -* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled -* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the metric, by default it will include everything. -* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the metric, for a nested key, prepend the parent keys with underscores + +*Configuration to define what JSON keys should be included and how (field/tag):* + +* **included_keys (OPTIONAL)**: You can define a list of key's that should be the only data included in the line protocol, by default it will include everything. +* **excluded_keys (OPTIONAL)**: You can define json keys to be excluded in the line protocol, for a nested key, prepend the parent keys with underscores * **tags (OPTIONAL)**: You can define json keys to be set as tags instead of fields, if you define a key that is an array or object then all nested values will become a tag -* **renames (OPTIONAL)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results -* **fields (OPTIONAL)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type +* **field (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [field](#field) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. +* **tag (OPTIONAL, defined in TOML as an array table using double brackets)**: Identical to the [tag](#tag) table you can define, but with two key differences. The path supports arrays and objects and is defined under the object table and therefore will adhere to how the JSON is structured. You want to use this if you want the field/tag to be added as it would if it were in the included_key list, but then use the GJSON path syntax. + +*Configuration to modify the resutling line protocol:* + +* **disable_prepend_keys (OPTIONAL)**: Set to true to prevent resulting nested data to contain the parent key prepended to its key **NOTE**: duplicate names can overwrite each other when this is enabled +* **renames (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired name (oppossed to defaulting to using the key), use names that include the prepended keys of its parent keys for nested results +* **fields (OPTIONAL, defined in TOML as a table using single bracket)**: A table matching the json key with the desired type (int,string,bool,float), if you define a key that is an array or object then all nested values will become that type ## Arrays and Objects The following describes the high-level approach when parsing arrays and objects: -**Array**: Every element in an array is treated as a *separate* metric +**Array**: Every element in an array is treated as a *separate* line protocol -**Object**: Every key/value in a object is treated as a *single* metric +**Object**: Every key/value in a object is treated as a *single* line protocol -When handling nested arrays and objects, these above rules continue to apply as the parser creates metrics. When an object has multiple array's as values, the array's will become separate metrics containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. +When handling nested arrays and objects, these above rules continue to apply as the parser creates line protocol. When an object has multiple array's as values, the array's will become separate line protocol containing only non-array values from the obejct. Below you can see an example of this behavior, with an input json containing an array of book objects that has a nested array of characters. Example JSON: @@ -157,7 +191,7 @@ Example configuration: disable_prepend_keys = true ``` -Expected metrics: +Expected line protocol: ``` file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="A Long-expected Party" @@ -173,7 +207,7 @@ You can find more complicated examples under the folder `testdata`. ## Types -For each field you have the option to define the types for each metric. The following rules are in place for this configuration: +For each field you have the option to define the types. The following rules are in place for this configuration: * If a type is explicitly defined, the parser will enforce this type and convert the data to the defined type if possible. If the type can't be converted then the parser will fail. * If a type isn't defined, the parser will use the default type defined in the JSON (int, float, string) diff --git a/plugins/parsers/json_v2/parser.go b/plugins/parsers/json_v2/parser.go index fa0946621cde4..46d089127ccdd 100644 --- a/plugins/parsers/json_v2/parser.go +++ b/plugins/parsers/json_v2/parser.go @@ -12,16 +12,33 @@ import ( "github.com/tidwall/gjson" ) +// Parser adheres to the parser interface, contains the parser configuration, and data required to parse JSON type Parser struct { + // These struct fields are common for a parser Configs []Config DefaultTags map[string]string Log telegraf.Logger - Timestamp time.Time + // **** The struct fields bellow this comment are used for processing indvidual configs **** + + // measurementName is the the name of the current config used in each line protocol measurementName string + // timestamp is the timestamp used in each line protocol, defaults to time.Now() + timestamp time.Time + + // **** Specific for object configuration **** + // subPathResults contains the results of sub-gjson path expressions provided in fields/tags table within object config + subPathResults []PathResult + // iterateObjects dictates if ExpandArray function will handle objects + iterateObjects bool + // objectConfig contains the config for an object, some info is needed while iterating over the gjson results + objectConfig JSONObject +} - iterateObjects bool - currentSettings JSONObject +type PathResult struct { + result gjson.Result + tag bool + DataSet } type Config struct { @@ -53,13 +70,21 @@ type JSONObject struct { IncludedKeys []string `toml:"included_keys"` // OPTIONAL ExcludedKeys []string `toml:"excluded_keys"` // OPTIONAL DisablePrependKeys bool `toml:"disable_prepend_keys"` // OPTIONAL + FieldPaths []DataSet // OPTIONAL + TagPaths []DataSet // OPTIONAL } type MetricNode struct { + ParentIndex int OutputName string SetName string Tag bool DesiredType string // Can be "int", "uint", "float", "bool", "string" + /* + IncludeCollection is only used when processing objects and is responsible for containing the gjson results + found by the gjson paths provided in the FieldPaths and TagPaths configs. + */ + IncludeCollection *PathResult Metric telegraf.Metric gjson.Result @@ -68,13 +93,13 @@ type MetricNode struct { func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // Only valid JSON is supported if !gjson.Valid(string(input)) { - return nil, fmt.Errorf("Invalid JSON provided, unable to parse") + return nil, fmt.Errorf("invalid JSON provided, unable to parse") } var metrics []telegraf.Metric for _, c := range p.Configs { - // Measurement name configuration + // Measurement name can either be hardcoded, or parsed from the JSON using a GJSON path expression p.measurementName = c.MeasurementName if c.MeasurementNamePath != "" { result := gjson.GetBytes(input, c.MeasurementNamePath) @@ -83,8 +108,8 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } } - // Timestamp configuration - p.Timestamp = time.Now() + // timestamp defaults to current time, or can be parsed from the JSON using a GJSON path expression + p.timestamp = time.Now() if c.TimestampPath != "" { result := gjson.GetBytes(input, c.TimestampPath) if !result.IsArray() && !result.IsObject() { @@ -94,24 +119,24 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } var err error - p.Timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) + p.timestamp, err = internal.ParseTimestamp(c.TimestampFormat, result.Value(), c.TimestampTimezone) if err != nil { return nil, err } } } - fields, err := p.processMetric(c.Fields, input, false) + fields, err := p.processMetric(input, c.Fields, false) if err != nil { return nil, err } - tags, err := p.processMetric(c.Tags, input, true) + tags, err := p.processMetric(input, c.Tags, true) if err != nil { return nil, err } - objects, err := p.processObjects(c.JSONObjects, input) + objects, err := p.processObjects(input, c.JSONObjects) if err != nil { return nil, err } @@ -119,7 +144,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { metrics = append(metrics, cartesianProduct(tags, fields)...) if len(objects) != 0 && len(metrics) != 0 { - metrics = append(metrics, cartesianProduct(objects, metrics)...) + metrics = cartesianProduct(objects, metrics) } else { metrics = append(metrics, objects...) } @@ -137,7 +162,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { // processMetric will iterate over all 'field' or 'tag' configs and create metrics for each // A field/tag can either be a single value or an array of values, each resulting in its own metric // For multiple configs, a set of metrics is created from the cartesian product of each separate config -func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegraf.Metric, error) { +func (p *Parser) processMetric(input []byte, data []DataSet, tag bool) ([]telegraf.Metric, error) { if len(data) == 0 { return nil, nil } @@ -173,7 +198,7 @@ func (p *Parser) processMetric(data []DataSet, input []byte, tag bool) ([]telegr p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ), Result: result, } @@ -243,30 +268,27 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { if result.IsArray() { var err error + if result.IncludeCollection == nil && (len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0) { + result.IncludeCollection = p.existsInpathResults(result.Index) + } result.ForEach(func(_, val gjson.Result) bool { m := metric.New( p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ) - if val.IsObject() { - if p.iterateObjects { - n := MetricNode{ - SetName: result.SetName, - Metric: m, - Result: val, - } - r, err := p.combineObject(n) - if err != nil { - return false - } - - results = append(results, r...) - } else { - p.Log.Debugf("Found object in query ignoring it please use 'object' to gather metrics from objects") + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val + r, err := p.combineObject(n) + if err != nil { + return false } + + results = append(results, r...) if len(results) != 0 { for _, newResult := range results { mergeMetric(result.Metric, newResult) @@ -275,20 +297,11 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return true } - for _, f := range result.Metric.FieldList() { - m.AddField(f.Key, f.Value) - } - for _, f := range result.Metric.TagList() { - m.AddTag(f.Key, f.Value) - } - n := MetricNode{ - Tag: result.Tag, - DesiredType: result.DesiredType, - OutputName: result.OutputName, - SetName: result.SetName, - Metric: m, - Result: val, - } + mergeMetric(result.Metric, m) + n := result + n.ParentIndex += val.Index + n.Metric = m + n.Result = val r, err := p.expandArray(n) if err != nil { return false @@ -300,12 +313,12 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return nil, err } } else { - if result.SetName == p.currentSettings.TimestampKey { - if p.currentSettings.TimestampFormat == "" { + if result.SetName == p.objectConfig.TimestampKey { + if p.objectConfig.TimestampFormat == "" { err := fmt.Errorf("use of 'timestamp_query' requires 'timestamp_format'") return nil, err } - timestamp, err := internal.ParseTimestamp(p.currentSettings.TimestampFormat, result.Value(), p.currentSettings.TimestampTimezone) + timestamp, err := internal.ParseTimestamp(p.objectConfig.TimestampFormat, result.Value(), p.objectConfig.TimestampTimezone) if err != nil { return nil, err } @@ -314,17 +327,43 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { switch result.Value().(type) { case nil: // Ignore JSON values that are set as null default: + outputName := result.OutputName + desiredType := result.DesiredType + + if len(p.objectConfig.FieldPaths) > 0 || len(p.objectConfig.TagPaths) > 0 { + var pathResult *PathResult + // When IncludeCollection isn't nil, that means the current result is included in the collection. + if result.IncludeCollection != nil { + pathResult = result.IncludeCollection + } else { + // Verify that the result should be included based on the results of fieldpaths and tag paths + pathResult = p.existsInpathResults(result.ParentIndex) + } + if pathResult == nil { + return results, nil + } + if pathResult.tag { + result.Tag = true + } + if !pathResult.tag { + desiredType = pathResult.Type + } + if pathResult.Rename != "" { + outputName = pathResult.Rename + } + } + if result.Tag { - result.DesiredType = "string" + desiredType = "string" } - v, err := p.convertType(result.Result, result.DesiredType, result.SetName) + v, err := p.convertType(result.Result, desiredType, result.SetName) if err != nil { return nil, err } if result.Tag { - result.Metric.AddTag(result.OutputName, v.(string)) + result.Metric.AddTag(outputName, v.(string)) } else { - result.Metric.AddField(result.OutputName, v) + result.Metric.AddField(outputName, v) } } } @@ -335,27 +374,57 @@ func (p *Parser) expandArray(result MetricNode) ([]telegraf.Metric, error) { return results, nil } +func (p *Parser) existsInpathResults(index int) *PathResult { + for _, f := range p.subPathResults { + if f.result.Index == index { + return &f + } + + // Indexes will be populated with all the elements that match on a `#(...)#` query + for _, i := range f.result.Indexes { + if i == index { + return &f + } + } + } + return nil +} + // processObjects will iterate over all 'object' configs and create metrics for each -func (p *Parser) processObjects(objects []JSONObject, input []byte) ([]telegraf.Metric, error) { +func (p *Parser) processObjects(input []byte, objects []JSONObject) ([]telegraf.Metric, error) { p.iterateObjects = true var t []telegraf.Metric for _, c := range objects { - p.currentSettings = c + p.objectConfig = c + if c.Path == "" { return nil, fmt.Errorf("GJSON path is required") } result := gjson.GetBytes(input, c.Path) - if result.Type == gjson.Null { - return nil, fmt.Errorf("GJSON Path returned null") + scopedJSON := []byte(result.Raw) + for _, f := range c.FieldPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + p.subPathResults = append(p.subPathResults, r) + } + + for _, f := range c.TagPaths { + var r PathResult + r.result = gjson.GetBytes(scopedJSON, f.Path) + r.DataSet = f + r.tag = true + p.subPathResults = append(p.subPathResults, r) } rootObject := MetricNode{ + ParentIndex: 0, Metric: metric.New( p.measurementName, map[string]string{}, map[string]interface{}{}, - p.Timestamp, + p.timestamp, ), Result: result, } @@ -389,28 +458,25 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } var outputName string - if p.currentSettings.DisablePrependKeys { + if p.objectConfig.DisablePrependKeys { outputName = strings.ReplaceAll(key.String(), " ", "_") } else { outputName = setName } - for k, n := range p.currentSettings.Renames { + for k, n := range p.objectConfig.Renames { if k == setName { outputName = n break } } - arrayNode := MetricNode{ - DesiredType: result.DesiredType, - Tag: result.Tag, - OutputName: outputName, - SetName: setName, - Metric: result.Metric, - Result: val, - } + arrayNode := result + arrayNode.ParentIndex += val.Index + arrayNode.OutputName = outputName + arrayNode.SetName = setName + arrayNode.Result = val - for k, t := range p.currentSettings.Fields { + for k, t := range p.objectConfig.Fields { if setName == k { arrayNode.DesiredType = t break @@ -418,7 +484,7 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } tag := false - for _, t := range p.currentSettings.Tags { + for _, t := range p.objectConfig.Tags { if setName == t { tag = true break @@ -451,12 +517,12 @@ func (p *Parser) combineObject(result MetricNode) ([]telegraf.Metric, error) { } func (p *Parser) isIncluded(key string, val gjson.Result) bool { - if len(p.currentSettings.IncludedKeys) == 0 { + if len(p.objectConfig.IncludedKeys) == 0 { return true } // automatically adds tags to included_keys so it does NOT have to be repeated in the config - p.currentSettings.IncludedKeys = append(p.currentSettings.IncludedKeys, p.currentSettings.Tags...) - for _, i := range p.currentSettings.IncludedKeys { + allKeys := append(p.objectConfig.IncludedKeys, p.objectConfig.Tags...) + for _, i := range allKeys { if i == key { return true } @@ -471,7 +537,7 @@ func (p *Parser) isIncluded(key string, val gjson.Result) bool { } func (p *Parser) isExcluded(key string) bool { - for _, i := range p.currentSettings.ExcludedKeys { + for _, i := range p.objectConfig.ExcludedKeys { if i == key { return true } @@ -491,33 +557,31 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { func (p *Parser) convertType(input gjson.Result, desiredType string, name string) (interface{}, error) { switch inputType := input.Value().(type) { case string: - if desiredType != "string" { - switch desiredType { - case "uint": - r, err := strconv.ParseUint(inputType, 10, 64) - if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) - } - return r, nil - case "int": - r, err := strconv.ParseInt(inputType, 10, 64) - if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) - } - return r, nil - case "float": - r, err := strconv.ParseFloat(inputType, 64) - if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) - } - return r, nil - case "bool": - r, err := strconv.ParseBool(inputType) - if err != nil { - return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) - } - return r, nil + switch desiredType { + case "uint": + r, err := strconv.ParseUint(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type uint: %v", name, err) } + return r, nil + case "int": + r, err := strconv.ParseInt(inputType, 10, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type int: %v", name, err) + } + return r, nil + case "float": + r, err := strconv.ParseFloat(inputType, 64) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type float: %v", name, err) + } + return r, nil + case "bool": + r, err := strconv.ParseBool(inputType) + if err != nil { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool: %v", name, err) + } + return r, nil } case bool: switch desiredType { @@ -537,22 +601,20 @@ func (p *Parser) convertType(input gjson.Result, desiredType string, name string return uint64(0), nil } case float64: - if desiredType != "float" { - switch desiredType { - case "string": - return fmt.Sprint(inputType), nil - case "int": - return input.Int(), nil - case "uint": - return input.Uint(), nil - case "bool": - if inputType == 0 { - return false, nil - } else if inputType == 1 { - return true, nil - } else { - return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) - } + switch desiredType { + case "string": + return fmt.Sprint(inputType), nil + case "int": + return input.Int(), nil + case "uint": + return input.Uint(), nil + case "bool": + if inputType == 0 { + return false, nil + } else if inputType == 1 { + return true, nil + } else { + return nil, fmt.Errorf("Unable to convert field '%s' to type bool", name) } } default: diff --git a/plugins/parsers/json_v2/parser_test.go b/plugins/parsers/json_v2/parser_test.go index f0f018034dc5b..3de93dc22b49f 100644 --- a/plugins/parsers/json_v2/parser_test.go +++ b/plugins/parsers/json_v2/parser_test.go @@ -16,81 +16,17 @@ import ( "github.com/stretchr/testify/require" ) -func TestData(t *testing.T) { - var tests = []struct { - name string - test string - }{ - { - name: "Test complex nesting", - test: "complex_nesting", - }, - { - name: "Test having an array of objects", - test: "array_of_objects", - }, - { - name: "Test using just fields and tags", - test: "fields_and_tags", - }, - { - name: "Test gathering from array of nested objects", - test: "nested_array_of_objects", - }, - { - name: "Test setting timestamp", - test: "timestamp", - }, - { - name: "Test setting measurement name from int", - test: "measurement_name_int", - }, - { - name: "Test multiple types", - test: "types", - }, - { - name: "Test settings tags in nested object", - test: "nested_tags", - }, - { - name: "Test settings tags in nested and non-nested objects", - test: "nested_and_nonnested_tags", - }, - { - name: "Test a more complex nested tag retrieval", - test: "nested_tags_complex", - }, - { - name: "Test multiple arrays in object", - test: "multiple_arrays_in_object", - }, - { - name: "Test fields and tags complex", - test: "fields_and_tags_complex", - }, - { - name: "Test object", - test: "object", - }, - { - name: "Test multiple timestamps", - test: "multiple_timestamps", - }, - { - name: "Test field with null", - test: "null", - }, - { - name: "Test large numbers (int64, uin64, float64)", - test: "large_numbers", - }, - } +func TestMultipleConfigs(t *testing.T) { + // Get all directories in testdata + folders, err := ioutil.ReadDir("testdata") + require.NoError(t, err) + // Make sure testdata contains data + require.Greater(t, len(folders), 0) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { + for _, f := range folders { + t.Run(f.Name(), func(t *testing.T) { // Process the telegraf config file for the test - buf, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", tc.test)) + buf, err := os.ReadFile(fmt.Sprintf("testdata/%s/telegraf.conf", f.Name())) require.NoError(t, err) inputs.Add("file", func() telegraf.Input { return &file.File{} @@ -107,10 +43,9 @@ func TestData(t *testing.T) { err = i.Gather(&acc) require.NoError(t, err) } - require.NoError(t, err) // Process expected metrics and compare with resulting metrics - expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", tc.test)) + expectedOutputs, err := readMetricFile(fmt.Sprintf("testdata/%s/expected.out", f.Name())) require.NoError(t, err) testutil.RequireMetricsEqual(t, expectedOutputs, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out new file mode 100644 index 0000000000000..e7f0e222418aa --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out @@ -0,0 +1 @@ +openweather,id=2.643743e+06,name=London coord_lat=51.5085,coord_lon=-0.1257,description="few clouds",main_temp=12.54,summary="Clouds",wind_speed=2.11 1628186541000000000 diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json new file mode 100644 index 0000000000000..402113af8ca9e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/input.json @@ -0,0 +1,44 @@ +{ + "coord": { + "lon": -0.1257, + "lat": 51.5085 + }, + "weather": [ + { + "id": 801, + "main": "Clouds", + "description": "few clouds", + "icon": "02n" + } + ], + "base": "stations", + "main": { + "temp": 12.54, + "feels_like": 11.86, + "temp_min": 10.49, + "temp_max": 14.27, + "pressure": 1024, + "humidity": 77 + }, + "visibility": 10000, + "wind": { + "speed": 2.11, + "deg": 254, + "gust": 4.63 + }, + "clouds": { + "all": 21 + }, + "dt": 1633545358, + "sys": { + "type": 2, + "id": 2019646, + "country": "GB", + "sunrise": 1633500560, + "sunset": 1633541256 + }, + "timezone": 3600, + "id": 2643743, + "name": "London", + "cod": 200 +} diff --git a/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf new file mode 100644 index 0000000000000..cc181960cbf1e --- /dev/null +++ b/plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf @@ -0,0 +1,15 @@ +[[inputs.file]] + files = ["./testdata/mix_field_and_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + measurement_name = "openweather" + [[inputs.file.json_v2.field]] + path = "weather.#.main" + rename = "summary" + [[inputs.file.json_v2.field]] + path = "weather.#.description" + [[inputs.file.json_v2.object]] + path = "@this" + included_keys = ["coord_lat", "coord_lon", "main_temp", "wind_speed"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) that should be only included in result + tags = ["id", "name"] # List of JSON keys (for a nested key, prepend the parent keys with underscores) to be a tag instead of a field + diff --git a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out index 2948da1720f64..04cd0635a5497 100644 --- a/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out +++ b/plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out @@ -6,4 +6,3 @@ file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of th file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Bilbo",species="hobbit",random=2 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=1 file,title=The\ Lord\ Of\ The\ Rings author="Tolkien",chapters="The Shadow of the Past",name="Frodo",species="hobbit",random=2 - diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out new file mode 100644 index 0000000000000..f3fa9f0d8571c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/expected.out @@ -0,0 +1,2 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North minutes=2i +file,from_station=POWL,to_station=DALY,etd_estimate_direction=South minutes=6i diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json new file mode 100644 index 0000000000000..f60cd59f91247 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json @@ -0,0 +1,87 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "07/02/2021", + "time": "06:05:47 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "2", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "16", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "31", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "22", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "52", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json new file mode 100644 index 0000000000000..e75e84a093b37 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json @@ -0,0 +1,134 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=POWL&dir=s&json=y" + }, + "date": "07/02/2021", + "time": "06:06:01 PM PDT", + "station": [ + { + "name": "Powell St.", + "abbr": "POWL", + "etd": [ + { + "destination": "Daly City", + "abbreviation": "DALY", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "1", + "direction": "South", + "length": "9", + "color": "BLUE", + "hexcolor": "#0099cc", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "1", + "direction": "South", + "length": "10", + "color": "GREEN", + "hexcolor": "#339933", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Millbrae", + "abbreviation": "MLBR", + "limited": "0", + "estimate": [ + { + "minutes": "19", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "49", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "79", + "platform": "1", + "direction": "South", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "SF Airport", + "abbreviation": "SFIA", + "limited": "0", + "estimate": [ + { + "minutes": "7", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "37", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "67", + "platform": "1", + "direction": "South", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf new file mode 100644 index 0000000000000..96c8ede181a54 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf @@ -0,0 +1,18 @@ +[[inputs.file]] + files = ["./testdata/multiple_json_input/input_1.json", "./testdata/multiple_json_input/input_2.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.tag]] + path="#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + rename = "minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out new file mode 100644 index 0000000000000..a7db83863a63c --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out @@ -0,0 +1 @@ +file,from_station=COLM,to_station=ANTC,etd_estimate_direction=North etd_estimate_minutes=6i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json new file mode 100644 index 0000000000000..45d0d5514ae76 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json @@ -0,0 +1,97 @@ +{ + "?xml": { + "@version": "1.0", + "@encoding": "utf-8" + }, + "root": { + "@id": "1", + "uri": { + "#cdata-section": "http://api.bart.gov/api/etd.aspx?cmd=etd&orig=COLM&dir=n&json=y" + }, + "date": "06/25/2021", + "time": "05:01:31 PM PDT", + "station": [ + { + "name": "Colma", + "abbr": "COLM", + "etd": [ + { + "destination": "Antioch", + "abbreviation": "ANTC", + "limited": "0", + "estimate": [ + { + "minutes": "6", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "36", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "51", + "platform": "2", + "direction": "North", + "length": "10", + "color": "YELLOW", + "hexcolor": "#ffff33", + "bikeflag": "1", + "delay": "0" + } + ] + }, + { + "destination": "Richmond", + "abbreviation": "RICH", + "limited": "0", + "estimate": [ + { + "minutes": "12", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "26", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + }, + { + "minutes": "41", + "platform": "2", + "direction": "North", + "length": "10", + "color": "RED", + "hexcolor": "#ff0000", + "bikeflag": "1", + "delay": "0" + } + ] + } + ] + } + ], + "message": "" + } +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf new file mode 100644 index 0000000000000..7a8a283d77c3d --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf @@ -0,0 +1,17 @@ +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "root.station" + [[inputs.file.json_v2.object.field]] + path = "#.etd.0.estimate.0.minutes" + type = "int" + [[inputs.file.json_v2.object.tag]] + path = "#.abbr" + rename = "from_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.abbreviation" + rename = "to_station" + [[inputs.file.json_v2.object.tag]] + path = "#.etd.0.estimate.0.direction" diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out new file mode 100644 index 0000000000000..89748967a1ee9 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out @@ -0,0 +1,4 @@ +file,data=3 cnt=23i,format=0i +file,data=7 cnt=23i,format=0i +file,data=10 cnt=23i,format=0i +file,data=23 cnt=23i,format=0i diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json new file mode 100644 index 0000000000000..62b768eae05a7 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json @@ -0,0 +1,10 @@ +{ + "cnt": 23, + "data": [ + 3, + 7, + 10, + 23 + ], + "format": 0 +} diff --git a/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf new file mode 100644 index 0000000000000..60d7d18da43d0 --- /dev/null +++ b/plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf @@ -0,0 +1,16 @@ +# Example taken from: https://github.com/influxdata/telegraf/issues/5940 + +[[inputs.file]] + files = ["./testdata/subfieldtag_in_object_2/input.json"] + data_format = "json_v2" + [[inputs.file.json_v2]] + [[inputs.file.json_v2.object]] + path = "@this" + [[inputs.file.json_v2.object.tag]] + path = "data" + [[inputs.file.json_v2.object.field]] + path = "cnt" + type = "int" + [[inputs.file.json_v2.object.field]] + path = "format" + type = "int" diff --git a/plugins/parsers/prometheus/parser.go b/plugins/parsers/prometheus/parser.go index e55789f7957b4..bc7ea0c636e4d 100644 --- a/plugins/parsers/prometheus/parser.go +++ b/plugins/parsers/prometheus/parser.go @@ -21,8 +21,9 @@ import ( ) type Parser struct { - DefaultTags map[string]string - Header http.Header + DefaultTags map[string]string + Header http.Header + IgnoreTimestamp bool } func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { @@ -65,14 +66,15 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { for _, m := range mf.Metric { // reading tags tags := common.MakeLabels(m, p.DefaultTags) + t := p.GetTimestamp(m, now) if mf.GetType() == dto.MetricType_SUMMARY { // summary metric - telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeQuantiles(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric - telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), now) + telegrafMetrics := makeBuckets(m, tags, metricName, mf.GetType(), t) metrics = append(metrics, telegrafMetrics...) } else { // standard metric @@ -80,7 +82,6 @@ func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { fields := getNameAndValue(m, metricName) // converting to telegraf metric if len(fields) > 0 { - t := getTimestamp(m, now) m := metric.New("prometheus", tags, fields, t, common.ValueType(mf.GetType())) metrics = append(metrics, m) } @@ -113,10 +114,9 @@ func (p *Parser) SetDefaultTags(tags map[string]string) { } // Get Quantiles for summary metric & Buckets for histogram -func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) @@ -137,10 +137,9 @@ func makeQuantiles(m *dto.Metric, tags map[string]string, metricName string, met } // Get Buckets from histogram metric -func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { +func makeBuckets(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, t time.Time) []telegraf.Metric { var metrics []telegraf.Metric fields := make(map[string]interface{}) - t := getTimestamp(m, now) fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) @@ -179,9 +178,9 @@ func getNameAndValue(m *dto.Metric, metricName string) map[string]interface{} { return fields } -func getTimestamp(m *dto.Metric, now time.Time) time.Time { +func (p *Parser) GetTimestamp(m *dto.Metric, now time.Time) time.Time { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !p.IgnoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, m.GetTimestampMs()*1000000) } else { t = now diff --git a/plugins/parsers/prometheus/parser_test.go b/plugins/parsers/prometheus/parser_test.go index f53b926bda4a5..52ef2f5a3bed3 100644 --- a/plugins/parsers/prometheus/parser_test.go +++ b/plugins/parsers/prometheus/parser_test.go @@ -2,7 +2,7 @@ package prometheus import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" @@ -74,7 +74,7 @@ func TestParsingValidGauge(t *testing.T) { testutil.RequireMetricsEqual(t, expected, metrics, testutil.IgnoreTime(), testutil.SortMetrics()) } -func TestParsingValieCounter(t *testing.T) { +func TestParsingValidCounter(t *testing.T) { expected := []telegraf.Metric{ testutil.MustMetric( "prometheus", @@ -340,6 +340,32 @@ test_counter{label="test"} 1 %d testutil.RequireMetricsEqual(t, expected, metrics, testutil.SortMetrics()) } +func TestMetricsWithoutIgnoreTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + expected := testutil.MustMetric( + "prometheus", + map[string]string{ + "label": "test", + }, + map[string]interface{}{ + "test_counter": float64(1.0), + }, + testTime, + telegraf.Counter, + ) + + parser := Parser{IgnoreTimestamp: true} + metric, _ := parser.ParseLine(metricsWithTimestamps) + + testutil.RequireMetricEqual(t, expected, metric, testutil.IgnoreTime(), testutil.SortMetrics()) + assert.WithinDuration(t, time.Now(), metric.Time(), 5*time.Second) +} + func parse(buf []byte) ([]telegraf.Metric, error) { parser := Parser{} return parser.Parse(buf) @@ -435,7 +461,7 @@ func TestParserProtobufHeader(t *testing.T) { t.Fatalf("error making HTTP request to %s: %s", ts.URL, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { t.Fatalf("error reading body: %s", err) } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index cc2102c9532d2..fcdfc473ae37a 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -156,6 +156,9 @@ type Config struct { // FormData configuration FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` + // Prometheus configuration + PrometheusIgnoreTimestamp bool `toml:"prometheus_ignore_timestamp"` + // Value configuration ValueFieldName string `toml:"value_field_name"` @@ -259,7 +262,10 @@ func NewParser(config *Config) (Parser, error) { config.FormUrlencodedTagKeys, ) case "prometheus": - parser, err = NewPrometheusParser(config.DefaultTags) + parser, err = NewPrometheusParser( + config.DefaultTags, + config.PrometheusIgnoreTimestamp, + ) case "prometheusremotewrite": parser, err = NewPrometheusRemoteWriteParser(config.DefaultTags) case "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf": @@ -378,9 +384,10 @@ func NewFormUrlencodedParser( }, nil } -func NewPrometheusParser(defaultTags map[string]string) (Parser, error) { +func NewPrometheusParser(defaultTags map[string]string, ignoreTimestamp bool) (Parser, error) { return &prometheus.Parser{ - DefaultTags: defaultTags, + DefaultTags: defaultTags, + IgnoreTimestamp: ignoreTimestamp, }, nil } @@ -395,7 +402,7 @@ func NewXPathParserConfigs(metricName string, cfgs []XPathConfig) []xpath.Config configs := make([]xpath.Config, 0, len(cfgs)) for _, cfg := range cfgs { config := xpath.Config(cfg) - config.MetricName = metricName + config.MetricDefaultName = metricName configs = append(configs, config) } return configs diff --git a/plugins/parsers/xpath/parser.go b/plugins/parsers/xpath/parser.go index 52224530a9250..75ebfd92035c1 100644 --- a/plugins/parsers/xpath/parser.go +++ b/plugins/parsers/xpath/parser.go @@ -35,14 +35,14 @@ type Parser struct { } type Config struct { - MetricName string - MetricQuery string `toml:"metric_name"` - Selection string `toml:"metric_selection"` - Timestamp string `toml:"timestamp"` - TimestampFmt string `toml:"timestamp_format"` - Tags map[string]string `toml:"tags"` - Fields map[string]string `toml:"fields"` - FieldsInt map[string]string `toml:"fields_int"` + MetricDefaultName string `toml:"-"` + MetricQuery string `toml:"metric_name"` + Selection string `toml:"metric_selection"` + Timestamp string `toml:"timestamp"` + TimestampFmt string `toml:"timestamp_format"` + Tags map[string]string `toml:"tags"` + Fields map[string]string `toml:"fields"` + FieldsInt map[string]string `toml:"fields_int"` FieldSelection string `toml:"field_selection"` FieldNameQuery string `toml:"field_name"` @@ -160,13 +160,19 @@ func (p *Parser) parseQuery(starttime time.Time, doc, selected dataNode, config // Determine the metric name. If a query was specified, use the result of this query and the default metric name // otherwise. - metricname = config.MetricName + metricname = config.MetricDefaultName if len(config.MetricQuery) > 0 { v, err := p.executeQuery(doc, selected, config.MetricQuery) if err != nil { return nil, fmt.Errorf("failed to query metric name: %v", err) } - metricname = v.(string) + var ok bool + if metricname, ok = v.(string); !ok { + if v == nil { + p.Log.Infof("Hint: Empty metric-name-node. If you wanted to set a constant please use `metric_name = \"'name'\"`.") + } + return nil, fmt.Errorf("failed to query metric name: query result is of type %T not 'string'", v) + } } // By default take the time the parser was invoked and override the value diff --git a/plugins/parsers/xpath/parser_test.go b/plugins/parsers/xpath/parser_test.go index 46e4dba690102..ead02e0392769 100644 --- a/plugins/parsers/xpath/parser_test.go +++ b/plugins/parsers/xpath/parser_test.go @@ -1,7 +1,7 @@ package xpath import ( - "io/ioutil" + "os" "path/filepath" "strings" "testing" @@ -148,8 +148,8 @@ func TestInvalidTypeQueriesFail(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "a": "/Device_1/value_string", }, @@ -185,8 +185,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(/Device_1/value_string)", }, @@ -207,8 +207,8 @@ func TestInvalidTypeQueries(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "boolean(/Device_1/value_string)", }, @@ -252,8 +252,8 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", }, }, defaultTags: map[string]string{}, @@ -269,9 +269,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", - TimestampFmt: "unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", + TimestampFmt: "unix", }, }, defaultTags: map[string]string{}, @@ -287,9 +287,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ms", - TimestampFmt: "unix_ms", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ms", + TimestampFmt: "unix_ms", }, }, defaultTags: map[string]string{}, @@ -305,9 +305,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_us", - TimestampFmt: "unix_us", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_us", + TimestampFmt: "unix_us", }, }, defaultTags: map[string]string{}, @@ -323,9 +323,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix_ns", - TimestampFmt: "unix_ns", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix_ns", + TimestampFmt: "unix_ns", }, }, defaultTags: map[string]string{}, @@ -341,9 +341,9 @@ func TestParseTimestamps(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -382,8 +382,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "/Device_1/value_int", "b": "/Device_1/value_float", @@ -410,8 +410,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "a": "number(Device_1/value_int)", "b": "number(/Device_1/value_float)", @@ -438,8 +438,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "b": "number(/Device_1/value_float)", "c": "boolean(/Device_1/value_bool)", @@ -468,8 +468,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -492,8 +492,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "x": "number(substring-before(/Device_1/value_position, ';'))", "y": "number(substring-after(/Device_1/value_position, ';'))", @@ -516,8 +516,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", FieldsInt: map[string]string{ "x": "substring-before(/Device_1/value_position, ';')", "y": "substring-after(/Device_1/value_position, ';')", @@ -540,8 +540,8 @@ func TestParseSingleValues(t *testing.T) { input: singleMetricValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix", Tags: map[string]string{ "state": "/Device_1/State", "name": "substring-after(/Device_1/Name, ' ')", @@ -587,8 +587,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", }, }, defaultTags: map[string]string{}, @@ -604,9 +604,9 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_iso/@value", - TimestampFmt: "2006-01-02T15:04:05Z", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_iso/@value", + TimestampFmt: "2006-01-02T15:04:05Z", }, }, defaultTags: map[string]string{}, @@ -622,8 +622,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_int/@_", "b": "/Device_1/attr_float/@_", @@ -650,8 +650,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "number(/Device_1/attr_int/@_)", "b": "number(/Device_1/attr_float/@_)", @@ -678,8 +678,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "b": "number(/Device_1/attr_float/@_)", "c": "boolean(/Device_1/attr_bool/@_)", @@ -708,8 +708,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "name": "substring-after(/Device_1/Name/@value, ' ')", }, @@ -730,8 +730,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Tags: map[string]string{ "state": "/Device_1/State/@_", "name": "substring-after(/Device_1/Name/@value, ' ')", @@ -754,8 +754,8 @@ func TestParseSingleAttributes(t *testing.T) { input: singleMetricAttributesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Device_1/Timestamp_unix/@value", + MetricDefaultName: "test", + Timestamp: "/Device_1/Timestamp_unix/@value", Fields: map[string]string{ "a": "/Device_1/attr_bool_numeric/@_ = 1", }, @@ -799,8 +799,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "a": "number(/Device/Value[1])", "b": "number(/Device/Value[2])", @@ -831,8 +831,8 @@ func TestParseMultiValues(t *testing.T) { input: singleMetricMultiValuesXML, configs: []Config{ { - MetricName: "test", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Timestamp: "/Timestamp/@value", FieldsInt: map[string]string{ "a": "/Device/Value[1]", "b": "/Device/Value[2]", @@ -886,9 +886,9 @@ func TestParseMultiNodes(t *testing.T) { input: multipleNodesXML, configs: []Config{ { - MetricName: "test", - Selection: "/Device", - Timestamp: "/Timestamp/@value", + MetricDefaultName: "test", + Selection: "/Device", + Timestamp: "/Timestamp/@value", Fields: map[string]string{ "value": "number(Value)", "active": "Active = 1", @@ -999,9 +999,9 @@ func TestParseMetricQuery(t *testing.T) { input: metricNameQueryXML, configs: []Config{ { - MetricName: "test", - MetricQuery: "name(/Device_1/Metric/@*[1])", - Timestamp: "/Device_1/Timestamp_unix", + MetricDefaultName: "test", + MetricQuery: "name(/Device_1/Metric/@*[1])", + Timestamp: "/Device_1/Timestamp_unix", Fields: map[string]string{ "value": "/Device_1/Metric/@*[1]", }, @@ -1017,6 +1017,29 @@ func TestParseMetricQuery(t *testing.T) { time.Unix(1577923199, 0), ), }, + { + name: "parse metric name constant", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "'the_metric'", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + defaultTags: map[string]string{}, + expected: testutil.MustMetric( + "the_metric", + map[string]string{}, + map[string]interface{}{ + "value": "ok", + }, + time.Unix(1577923199, 0), + ), + }, } for _, tt := range tests { @@ -1032,6 +1055,42 @@ func TestParseMetricQuery(t *testing.T) { } } +func TestParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + configs []Config + expected string + }{ + { + name: "string metric name query", + input: metricNameQueryXML, + configs: []Config{ + { + MetricDefaultName: "test", + MetricQuery: "arbitrary", + Timestamp: "/Device_1/Timestamp_unix", + Fields: map[string]string{ + "value": "/Device_1/Metric/@*[1]", + }, + }, + }, + expected: "failed to query metric name: query result is of type not 'string'", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{Configs: tt.configs, DefaultTags: map[string]string{}, Log: testutil.Logger{Name: "parsers.xml"}} + require.NoError(t, parser.Init()) + + _, err := parser.ParseLine(tt.input) + require.Error(t, err) + require.Equal(t, tt.expected, err.Error()) + }) + } +} + func TestEmptySelection(t *testing.T) { var tests = []struct { name string @@ -1146,7 +1205,7 @@ func TestTestCases(t *testing.T) { filename := filepath.FromSlash(tt.filename) cfg, header, err := loadTestConfiguration(filename) require.NoError(t, err) - cfg.MetricName = "xml" + cfg.MetricDefaultName = "xml" // Load the xml-content input, err := testutil.ParseRawLinesFrom(header, "File:") @@ -1174,7 +1233,7 @@ func TestTestCases(t *testing.T) { pbmsgtype = protofields[1] } - content, err := ioutil.ReadFile(datafile) + content, err := os.ReadFile(datafile) require.NoError(t, err) // Get the expectations @@ -1207,7 +1266,7 @@ func TestTestCases(t *testing.T) { } func loadTestConfiguration(filename string) (*Config, []string, error) { - buf, err := ioutil.ReadFile(filename) + buf, err := os.ReadFile(filename) if err != nil { return nil, nil, err } diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go index a305c4c5c2f29..c6a3262921407 100644 --- a/plugins/processors/filepath/filepath_test.go +++ b/plugins/processors/filepath/filepath_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package filepath diff --git a/plugins/processors/ifname/ifname.go b/plugins/processors/ifname/ifname.go index 10623c041dd2d..eb3fb2333e278 100644 --- a/plugins/processors/ifname/ifname.go +++ b/plugins/processors/ifname/ifname.go @@ -157,7 +157,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { for { m, age, err := d.getMap(agent) if err != nil { - return fmt.Errorf("couldn't retrieve the table of interface names: %w", err) + return fmt.Errorf("couldn't retrieve the table of interface names for %s: %w", agent, err) } name, found := m[num] @@ -171,7 +171,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { // the interface we're interested in. If the entry is old // enough, retrieve it from the agent once more. if age < minRetry { - return fmt.Errorf("interface number %d isn't in the table of interface names", num) + return fmt.Errorf("interface number %d isn't in the table of interface names on %s", num, agent) } if firstTime { @@ -181,7 +181,7 @@ func (d *IfName) addTag(metric telegraf.Metric) error { } // not found, cache hit, retrying - return fmt.Errorf("missing interface but couldn't retrieve table") + return fmt.Errorf("missing interface but couldn't retrieve table for %v", agent) } } @@ -212,7 +212,7 @@ func (d *IfName) Start(acc telegraf.Accumulator) error { fn := func(m telegraf.Metric) []telegraf.Metric { err := d.addTag(m) if err != nil { - d.Log.Debugf("Error adding tag %v", err) + d.Log.Debugf("Error adding tag: %v", err) } return []telegraf.Metric{m} } diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go index c8cf73d14157c..3b9a4ce579c9a 100644 --- a/plugins/processors/port_name/services_path.go +++ b/plugins/processors/port_name/services_path.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package portname diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go index 5097bfa9c6140..5fd30eb59671d 100644 --- a/plugins/processors/port_name/services_path_notwindows.go +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package portname diff --git a/plugins/processors/starlark/field_dict.go b/plugins/processors/starlark/field_dict.go index af32da185ba11..4a332b8268d9d 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/processors/starlark/field_dict.go @@ -175,6 +175,7 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e sv, err := asStarlarkValue(value) return sv, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") diff --git a/plugins/processors/starlark/starlark_test.go b/plugins/processors/starlark/starlark_test.go index 15152a2f349c3..6ad169bbf3f87 100644 --- a/plugins/processors/starlark/starlark_test.go +++ b/plugins/processors/starlark/starlark_test.go @@ -3,7 +3,6 @@ package starlark import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -706,6 +705,49 @@ def apply(metric): ), }, }, + { + name: "pop tag (default)", + source: ` +def apply(metric): + metric.tags['host2'] = metric.tags.pop('url', 'foo.org') + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "url": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "foo.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "host": "example.org", + "host2": "bar.org", + }, + map[string]interface{}{"time_idle": 0}, + time.Unix(0, 0), + ), + }, + }, { name: "popitem tags", source: ` @@ -1774,6 +1816,53 @@ def apply(metric): ), }, }, + { + name: "pop field (default)", + source: ` +def apply(metric): + metric.fields['idle_count'] = metric.fields.pop('count', 10) + return metric +`, + input: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "count": 0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 10, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 0, + "time_guest": 0, + "idle_count": 0, + }, + time.Unix(0, 0), + ), + }, + }, { name: "popitem field", source: ` @@ -3194,7 +3283,7 @@ func TestAllScriptTestData(t *testing.T) { } fn := path t.Run(fn, func(t *testing.T) { - b, err := ioutil.ReadFile(fn) + b, err := os.ReadFile(fn) require.NoError(t, err) lines := strings.Split(string(b), "\n") inputMetrics := parseMetricsFrom(t, lines, "Example Input:") diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/processors/starlark/tag_dict.go index b17a6e2f0b6a3..7dbb8c12d0ed6 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/processors/starlark/tag_dict.go @@ -162,6 +162,7 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err v := starlark.String(value) return v, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") diff --git a/plugins/processors/starlark/testdata/sparkplug.star b/plugins/processors/starlark/testdata/sparkplug.star new file mode 100644 index 0000000000000..9d7987c189e10 --- /dev/null +++ b/plugins/processors/starlark/testdata/sparkplug.star @@ -0,0 +1,320 @@ + +# This Starlark processor is used when loading Sparkplug B protobuf # +# messages into InfluxDB. The data source is a Opto22 Groov EPIC controller. +# +# This processor does the following: +# - Resolves the metric name using a numeric alias. +# When the EPIC MQTT client is started it sends a DBIRTH message +# that lists all metrics configured on the controller and includes +# a sequential numeric alias to reference it by. +# This processor stores that information in the array states["aliases"]. + +# When subsequent DDATA messages are published, the numeric alias is +# used to find the stored metric name in the array states["aliases"]. +# - Splits the MQTT topic into 5 fields which can be used as tags in InfluxDB. +# - Splits the metric name into 6 fields which are be used as tags in InfluxDB. +# - Deletes the host, type, topic, name and alias tags +# +# TODO: +# The requirment that a DBIRTH message has to be received before DDATA messages +# can be used creates a significant reliability issue and a debugging mess. +# I have to go into the Groov EPIC controller and restart the MQTT client everytime +# I restart the telegraf loader. This has caused many hours of needless frustration. +# +# I see two possible solutions: +# - Opto 22 changes their software making it optional to drop the alias +# and simply include the name in the DDATA messages. In my case it's never more +# than 15 characters. This is the simplest and most reliable solution. +# - Make a system call from telegraf and using SSH to remotely restart the MQTT client. +# - Have telegraf send a message through MQTT requesting a DBIRTH message from the EPIC Controller. +# +# Example Input: +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 type=9i,value=22.247711,alias=10i 1626475876000000000 +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 alias=10i,type=9i,value=22.231323 1626475877000000000 +# edge,host=firefly,topic=spBv1.0/SF/DBIRTH/epiclc/Exp501 type=9i,name="Strategy/IO/I_Ch_TC_Right",alias=9i 1626475880000000000 +# edge,host=firefly,topic=spBv1.0/SF/DBIRTH/epiclc/Exp501 value=22.200958,name="Strategy/IO/I_Ch_TC_Top_C",type=9i,alias=10i 1626475881000000000 +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 alias=10i,type=9i,value=22.177643 1626475884000000000 +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 type=9i,value=22.231903,alias=10i 1626475885000000000 +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 value=22.165192,alias=10i,type=9i 1626475895000000000 +# edge,host=firefly,topic=spBv1.0/SF/DDATA/epiclc/Exp501 alias=10i,type=9i,value=22.127106 1626475896000000000 +# +# Example Output: +# C,Component=Ch,Datatype=IO,Device=TC,EdgeID=epiclc,Experiment=Exp501,Metric=I_Ch_TC_Top_C,MsgType=DBIRTH,Position=Top,Reactor=SF,Source=Strategy value=22.200958 1626475881000000000 +# C,Component=Ch,Datatype=IO,Device=TC,EdgeID=epiclc,Experiment=Exp501,Metric=I_Ch_TC_Top_C,MsgType=DDATA,Position=Top,Reactor=SF,Source=Strategy value=22.177643 1626475884000000000 +# C,Component=Ch,Datatype=IO,Device=TC,EdgeID=epiclc,Experiment=Exp501,Metric=I_Ch_TC_Top_C,MsgType=DDATA,Position=Top,Reactor=SF,Source=Strategy value=22.231903 1626475885000000000 +# C,Component=Ch,Datatype=IO,Device=TC,EdgeID=epiclc,Experiment=Exp501,Metric=I_Ch_TC_Top_C,MsgType=DDATA,Position=Top,Reactor=SF,Source=Strategy value=22.165192 1626475895000000000 +# C,Component=Ch,Datatype=IO,Device=TC,EdgeID=epiclc,Experiment=Exp501,Metric=I_Ch_TC_Top_C,MsgType=DDATA,Position=Top,Reactor=SF,Source=Strategy value=22.127106 1626475896000000000 + +############################################# +# The following is the telegraf.conf used when calling this processor + +# [[inputs.mqtt_consumer]] +# servers = ["tcp://your_server:1883"] +# qos = 0 +# connection_timeout = "30s" +# topics = ["spBv1.0/#"] +# persistent_session = false +# client_id = "" +# username = "your username" +# password = "your password" +# +# # Sparkplug protobuf configuration +# data_format = "xpath_protobuf" +# +# # URL of sparkplug protobuf prototype +# xpath_protobuf_type = "org.eclipse.tahu.protobuf.Payload" +# +# # Location of sparkplug_b.proto file +# xpath_protobuf_file = "/apps/telegraf/config/sparkplug_b.proto" +# +# [[inputs.mqtt_consumer.xpath_protobuf]] +# metric_selection = "metrics[not(template_value)]" +# metric_name = "concat('edge', substring-after(name, ' '))" +# timestamp = "timestamp" +# timestamp_format = "unix_ms" +# [inputs.mqtt_consumer.xpath_protobuf.tags] +# name = "substring-after(name, ' ')" +# [inputs.mqtt_consumer.xpath_protobuf.fields_int] +# type = "datatype" +# alias = "alias" +# [inputs.mqtt_consumer.xpath_protobuf.fields] +# # A metric value must be numeric +# value = "number((int_value | long_value | float_value | double_value | boolean_value))" +# name = "name" +# +# # Starlark processor +# [[processors.starlark]] +# script = "sparkplug.star" +# +# # Optionally Define constants used in sparkplug.star +# # Constants can be defined here or they can be defined in the +# # sparkplug_b.star file. +# +# [processors.starlark.constants] +# +# # NOTE: The remaining fields can be specified either here or in the starlark script. +# +# # Tags used to identify message type - 3rd field of topic +# BIRTH_TAG = "BIRTH/" +# DEATH_TAG = "DEATH/" +# DATA_TAG = "DATA/" +# +# # Number of messages to hold if alias cannot be resolved +# MAX_UNRESOLVED = 3 +# +# # Provide alternate names for the 5 sparkplug topic fields. +# # The topic contains 5 fields separated by the '/' character. +# # Define the tag name for each of these fields. +# MSG_FORMAT = "false" #0 +# GROUP_ID = "reactor" #1 +# MSG_TYPE = "false" #2 +# EDGE_ID = "edgeid" #3 +# DEVICE_ID = "experiment" #4 +# + +BIRTH_TAG = "BIRTH/" +DEATH_TAG = "DEATH/" +DATA_TAG = "DATA/" + +# Number of messages to hold if alias cannot be resolved +MAX_UNRESOLVED = 3 + +# Provide alternate names for the 5 sparkplug topic fields. +# The topic contains 5 fields separated by the '/' character. +# Define the tag name for each of these fields. +MSG_FORMAT = "false" #0 +GROUP_ID = "Reactor" #1 +MSG_TYPE = "MsgType" #2 +EDGE_ID = "EdgeID" #3 +DEVICE_ID = "Experiment" #4 + +########### Begin sparkplug.star script + + +load("logging.star", "log") + +state = { + "aliases": dict(), + "devices": dict(), + "unresolved": list() +} + +def extractTopicTags(metric): + msg_format = '' + groupid = '' + msg_type = '' + edgeid = '' + deviceid = '' + + topic = metric.tags.get("topic", ""); + fields = topic.split("/"); + nfields = len(fields) + if nfields > 0: msg_format = fields[0] + if nfields > 1: groupid = fields[1] + if nfields > 2: msg_type = fields[2] + if nfields > 3: edgeid = fields[3] + if nfields > 4: deviceid = fields[4] + return [msg_format, groupid, msg_type, edgeid, deviceid] + + +def buildTopicTags(metric, topicFields): + # Remove topic and host tags - they are not useful for analysis + metric.tags.pop("topic") + metric.tags.pop("host") + + if MSG_FORMAT != "false": metric.tags[MSG_FORMAT] = topicFields[0] + if GROUP_ID != "false": metric.tags[GROUP_ID] = topicFields[1] + if MSG_TYPE != "false": metric.tags[MSG_TYPE] = topicFields[2] + if EDGE_ID != "false": metric.tags[EDGE_ID] = topicFields[3] + if DEVICE_ID != "false": metric.tags[DEVICE_ID] = topicFields[4] + + +def buildNameTags(metric,name): + # Remove type and alias from metric.fields - They are not useful for analysis + metric.fields.pop("type") + metric.fields.pop("alias") + if "name" in metric.fields: + metric.fields.pop("name") + + # The Groov EPIC metric names are comprised of 3 fields separated by a '/' + # source, datatype, and metric name + # Extract these fields and include them as tags. + fields = name.split('/') + nfields = len(fields) + if nfields > 0: + metric.tags["Source"] = fields[0] + if nfields > 1: + metric.tags["Datatype"] = fields[1] + if nfields > 2: + metric.tags["Metric"] = fields[2] + + # OPTIONAL + # + # By using underscore characters the metric name can be further + # divided into additional tags. + # How this is defined is site specific. + # Customize this as you wish + + # The following demonstrates dividing the metric name into 3, 4 or 5 new tags + # A metric name must have between 3-5 underscore separated fields + + # If there is only one or two fields then the only tag created is 'metric' + # which has the full name + # + # The last field is Units and is filled before fields 3, 4 and 5 + # Ex: C, V, Torr, W, psi, RPM, On.... + # The units are used in Influx as the 'measurement' name. + # + # + # Fields 3, 4 and 5 (device, position, composition) are optional + # measurement_component_device_position_composition_units + # + # Ex: I_FuelTank1_C (2 fields) + # Measurement I + # Component FuelTank1 + # Units C + # + # I_FuelTank1_TC_Outlet_C (5 fields) + # Measurement I + # Component FuelTank1 + # Device TC + # Position Outlet + # Units C + # + # I_FuelTank1_TC_Outlet_Premium_C (6 fields) + # Measurement I + # Component FuelTank1 + # Device TC + # Position Outlet + # Composition Premium + # Units C + + # Split the metric name into fields using '_' + sfields = fields[2].split('_') + nf = len(sfields) + # Don't split the name if it's one or two fields + if nf <= 2: + metric.name = "Name" + if nf > 2: + metric.name = sfields[nf-1] # The Units are used for the metric name + metric.tags["Component"] = sfields[1] + if nf > 3: + metric.tags["Device"] = sfields[2] + if nf > 4: + metric.tags["Position"] = sfields[3] + if nf > 5: + metric.tags["Composition"] = sfields[4] + +def apply(metric): + output = metric + + log.debug("apply metric: {}".format(metric)) + + topic = metric.tags.get("topic", "") + topicFields = extractTopicTags(metric) + edgeid = topicFields[3] # Sparkplug spec specifies 4th field as edgeid + + # Split the topic into fields and assign to variables + # Determine if the message is of type birth and if so add it to the "devices" LUT. + if DEATH_TAG in topic: + output = None + elif BIRTH_TAG in topic: + log.debug(" metric msg_type: {} edgeid: {} topic: {}".format(BIRTH_TAG, edgeid, topic)) + if "alias" in metric.fields and "name" in metric.fields: + # Create the lookup-table using "${edgeid}/${alias}" as the key and "${name}" as value + alias = metric.fields.get("alias") + name = metric.fields.get("name") + id = "{}/{}".format(edgeid,alias) + log.debug(" --> setting alias: {} name: {} id: {}'".format(alias, name, id)) + state["aliases"][id] = name + if "value" in metric.fields: + buildTopicTags(metric, topicFields) + buildNameTags(metric, name) + else: + output = None + + # Try to resolve the unresolved if any + if len(state["unresolved"]) > 0: + # Filter out the matching metrics and keep the rest as unresolved + log.debug(" unresolved") + unresolved = [("{}/{}".format(edgeid, m.fields["alias"]), m) for m in state["unresolved"]] + matching = [(mid, m) for mid, m in unresolved if mid == id] + state["unresolved"] = [m for mid, m in unresolved if mid != id] + + log.debug(" found {} matching unresolved metrics".format(len(matching))) + # Process the matching metrics and output - TODO - needs debugging + # for mid, m in matching: + # buildTopicTags(m,topicFields) + # buildNameTags(m) + # output = [m for _, m in matching] + [metric] + + elif DATA_TAG in topic: + log.debug(" metric msg_type: {} edgeid: {} topic: {}".format(DATA_TAG, edgeid, topic)) + if "alias" in metric.fields: + alias = metric.fields.get("alias") + + # Lookup the ID. If we know it, replace the name of the metric with the lookup value, + # otherwise we need to keep the metric for resolving later. + # This can happen if the messages are out-of-order for some reason... + id = "{}/{}".format(edgeid,alias) + if id in state["aliases"]: + name = state["aliases"][id] + log.debug(" found alias: {} name: {}".format(alias, name)) + buildTopicTags(metric,topicFields) + buildNameTags(metric,name) + else: + # We want to hold the metric until we get the corresponding birth message + log.debug(" id not found: {}".format(id)) + output = None + if len(state["unresolved"]) >= MAX_UNRESOLVED: + log.warn(" metric overflow, trimming {}".format(len(state["unresolved"]) - MAX_UNRESOLVED+1)) + # Release the unresolved metrics as raw and trim buffer + output = state["unresolved"][MAX_UNRESOLVED-1:] + state["unresolved"] = state["unresolved"][:MAX_UNRESOLVED-1] + log.debug(" --> keeping metric") + state["unresolved"].append(metric) + else: + output = None + + return output + diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md index 08bb9d4f7c904..b33875578272a 100644 --- a/plugins/serializers/json/README.md +++ b/plugins/serializers/json/README.md @@ -19,6 +19,13 @@ The `json` output data format converts metrics into JSON documents. ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to ## the power of 10 less than the specified units. json_timestamp_units = "1s" + + ## The default timestamp format is Unix epoch time, subject to the + # resolution configured in json_timestamp_units. + # Other timestamp layout can be configured using the Go language time + # layout specification from https://golang.org/pkg/time/#Time.Format + # e.g.: json_timestamp_format = "2006-01-02T15:04:05Z07:00" + #json_timestamp_format = "" ``` ### Examples: diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index e2d7af3305117..6db2a43ee231a 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -8,18 +8,20 @@ import ( "github.com/influxdata/telegraf" ) -type serializer struct { - TimestampUnits time.Duration +type Serializer struct { + TimestampUnits time.Duration + TimestampFormat string } -func NewSerializer(timestampUnits time.Duration) (*serializer, error) { - s := &serializer{ - TimestampUnits: truncateDuration(timestampUnits), +func NewSerializer(timestampUnits time.Duration, timestampformat string) (*Serializer, error) { + s := &Serializer{ + TimestampUnits: truncateDuration(timestampUnits), + TimestampFormat: timestampformat, } return s, nil } -func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { m := s.createObject(metric) serialized, err := json.Marshal(m) if err != nil { @@ -30,7 +32,7 @@ func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { objects := make([]interface{}, 0, len(metrics)) for _, metric := range metrics { m := s.createObject(metric) @@ -48,7 +50,7 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { return serialized, nil } -func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} { +func (s *Serializer) createObject(metric telegraf.Metric) map[string]interface{} { m := make(map[string]interface{}, 4) tags := make(map[string]string, len(metric.TagList())) @@ -71,7 +73,11 @@ func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} m["fields"] = fields m["name"] = metric.Name() - m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + if s.TimestampFormat == "" { + m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) + } else { + m["timestamp"] = metric.Time().UTC().Format(s.TimestampFormat) + } return m } diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 74d7f94166621..be939243904eb 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -30,7 +30,7 @@ func TestSerializeMetricFloat(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -40,9 +40,10 @@ func TestSerializeMetricFloat(t *testing.T) { func TestSerialize_TimestampUnits(t *testing.T) { tests := []struct { - name string - timestampUnits time.Duration - expected string + name string + timestampUnits time.Duration + timestampFormat string + expected string }{ { name: "default of 1s", @@ -74,6 +75,11 @@ func TestSerialize_TimestampUnits(t *testing.T) { timestampUnits: 65 * time.Millisecond, expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":152547879512}`, }, + { + name: "timestamp format", + timestampFormat: "2006-01-02T15:04:05Z07:00", + expected: `{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":"2018-05-05T00:06:35Z"}`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -85,7 +91,7 @@ func TestSerialize_TimestampUnits(t *testing.T) { }, time.Unix(1525478795, 123456789), ) - s, _ := NewSerializer(tt.timestampUnits) + s, _ := NewSerializer(tt.timestampUnits, tt.timestampFormat) actual, err := s.Serialize(m) require.NoError(t, err) require.Equal(t, tt.expected+"\n", string(actual)) @@ -103,7 +109,7 @@ func TestSerializeMetricInt(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -122,7 +128,7 @@ func TestSerializeMetricString(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -142,7 +148,7 @@ func TestSerializeMultiFields(t *testing.T) { } m := metric.New("cpu", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") var buf []byte buf, err := s.Serialize(m) assert.NoError(t, err) @@ -161,7 +167,7 @@ func TestSerializeMetricWithEscapes(t *testing.T) { } m := metric.New("My CPU", tags, fields, now) - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.Serialize(m) assert.NoError(t, err) @@ -180,7 +186,7 @@ func TestSerializeBatch(t *testing.T) { ) metrics := []telegraf.Metric{m, m} - s, _ := NewSerializer(0) + s, _ := NewSerializer(0, "") buf, err := s.SerializeBatch(metrics) require.NoError(t, err) require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) @@ -199,7 +205,7 @@ func TestSerializeBatchSkipInf(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) @@ -218,7 +224,7 @@ func TestSerializeBatchSkipInfAllFields(t *testing.T) { ), } - s, err := NewSerializer(0) + s, err := NewSerializer(0, "") require.NoError(t, err) buf, err := s.SerializeBatch(metrics) require.NoError(t, err) diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md index 19c869ffbccb3..446def0b46d77 100644 --- a/plugins/serializers/prometheus/README.md +++ b/plugins/serializers/prometheus/README.md @@ -8,7 +8,11 @@ use the `metric_version = 2` option in order to properly round trip metrics. not be correct if the metric spans multiple batches. This issue can be somewhat, but not fully, mitigated by using outputs that support writing in "batch format". When using histogram and summary types, it is recommended to -use only the `prometheus_client` output. +use only the `prometheus_client` output. Histogram and Summary types +also update their expiration time based on the most recently received data. +If incoming metrics stop updating specific buckets or quantiles but continue +reporting others every bucket/quantile will continue to exist. + ### Configuration diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go index ed442e23c85fd..caa8a7334d91d 100644 --- a/plugins/serializers/prometheus/collection.go +++ b/plugins/serializers/prometheus/collection.go @@ -241,6 +241,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Histogram: &Histogram{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_bucket"): @@ -289,6 +292,9 @@ func (c *Collection) Add(metric telegraf.Metric, now time.Time) { AddTime: now, Summary: &Summary{}, } + } else { + m.Time = metric.Time() + m.AddTime = now } switch { case strings.HasSuffix(field.Key, "_sum"): diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go index d2c5f5d098162..deb400ba2d899 100644 --- a/plugins/serializers/prometheus/collection_test.go +++ b/plugins/serializers/prometheus/collection_test.go @@ -302,6 +302,117 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "entire histogram expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "histogram does not expire because of addtime from bucket", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(15, 0), // More recent addtime causes entire metric to stay valid + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(10.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(1), + }, + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "summary quantile updates", now: time.Unix(0, 0), @@ -379,6 +490,106 @@ func TestCollectionExpire(t *testing.T) { }, }, }, + { + name: "Entire summary expires", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "summary does not expire because of quantile addtime", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 10.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(15, 0), // Recent addtime keeps entire metric around + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleSum: proto.Float64(1), + SampleCount: proto.Uint64(1), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.5), + Value: proto.Float64(10), + }, + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(1), + }, + }, + }, + }, + }, + }, + }, + }, { name: "expire based on add time", now: time.Unix(20, 0), @@ -425,3 +636,209 @@ func TestCollectionExpire(t *testing.T) { }) } } + +func TestExportTimestamps(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + input []Input + expected []*dto.MetricFamily + }{ + { + name: "histogram bucket updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(15, 0), + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Histogram, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(23, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(15, 0), + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(20, 0), // Updated timestamp + telegraf.Summary, + ), + addtime: time.Unix(23, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + TimestampMs: proto.Int64(time.Unix(20, 0).UnixNano() / int64(time.Millisecond)), + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{TimestampExport: ExportTimestamp}) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index e67a9594dda73..b17364e66f0a6 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -88,6 +88,9 @@ type Config struct { // Timestamp units to use for JSON formatted output TimestampUnits time.Duration `toml:"timestamp_units"` + // Timestamp format to use for JSON formatted output + TimestampFormat string `toml:"timestamp_format"` + // Include HEC routing fields for splunkmetric output HecRouting bool `toml:"hec_routing"` @@ -123,7 +126,7 @@ func NewSerializer(config *Config) (Serializer, error) { case "graphite": serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteTagSanitizeMode, config.GraphiteSeparator, config.Templates) case "json": - serializer, err = NewJSONSerializer(config.TimestampUnits) + serializer, err = NewJSONSerializer(config.TimestampUnits, config.TimestampFormat) case "splunkmetric": serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) case "nowmetric": @@ -188,8 +191,8 @@ func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []stri return wavefront.NewSerializer(prefix, useStrict, sourceOverride) } -func NewJSONSerializer(timestampUnits time.Duration) (Serializer, error) { - return json.NewSerializer(timestampUnits) +func NewJSONSerializer(timestampUnits time.Duration, timestampFormat string) (Serializer, error) { + return json.NewSerializer(timestampUnits, timestampFormat) } func NewCarbon2Serializer(carbon2format string, carbon2SanitizeReplaceChar string) (Serializer, error) { diff --git a/scripts/alpine.docker b/scripts/alpine.docker index d5b8b85f6abb7..84cfcac2268a0 100644 --- a/scripts/alpine.docker +++ b/scripts/alpine.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6 as builder +FROM golang:1.17.2 as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/build.py b/scripts/build.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/scripts/buster.docker b/scripts/buster.docker index 685d30067e0ef..17b0cb581cc92 100644 --- a/scripts/buster.docker +++ b/scripts/buster.docker @@ -1,4 +1,4 @@ -FROM golang:1.16.6-buster as builder +FROM golang:1.17.2-buster as builder WORKDIR /go/src/github.com/influxdata/telegraf COPY . /go/src/github.com/influxdata/telegraf diff --git a/scripts/ci-1.16.docker b/scripts/ci-1.16.docker deleted file mode 100644 index f0b2badafd521..0000000000000 --- a/scripts/ci-1.16.docker +++ /dev/null @@ -1,23 +0,0 @@ -FROM golang:1.16.6 - -RUN chmod -R 755 "$GOPATH" - -RUN DEBIAN_FRONTEND=noninteractive \ - apt update && apt install -y --no-install-recommends \ - autoconf \ - git \ - libtool \ - locales \ - make \ - awscli \ - rpm \ - ruby \ - ruby-dev \ - zip && \ - rm -rf /var/lib/apt/lists/* - -RUN ln -sf /usr/share/zoneinfo/Etc/UTC /etc/localtime -RUN locale-gen C.UTF-8 || true -ENV LANG=C.UTF-8 - -RUN gem install fpm diff --git a/scripts/ci-1.15.docker b/scripts/ci-1.17.docker similarity index 95% rename from scripts/ci-1.15.docker rename to scripts/ci-1.17.docker index 2b87f29be4e3e..a69a0d7eddbe3 100644 --- a/scripts/ci-1.15.docker +++ b/scripts/ci-1.17.docker @@ -1,4 +1,4 @@ -FROM golang:1.15.8 +FROM golang:1.17.2 RUN chmod -R 755 "$GOPATH" diff --git a/scripts/generate_config.sh b/scripts/generate_config.sh new file mode 100755 index 0000000000000..c85dd05172631 --- /dev/null +++ b/scripts/generate_config.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# This script is responsible for generating the Telegraf config found under the `etc` directory. +# This script is meant to be only ran in within the Circle CI pipeline so that the Tiger Bot can update them automatically. +# It supports Windows and Linux because the configs are different depending on the OS. + + +os=$1 # windows or linux +exe_path="/build/extracted" # Path will contain telegraf binary +config_name="telegraf.conf" + +if [ "$os" = "windows" ]; then + zip=$(/bin/find ./build/dist -maxdepth 1 -name "*windows_amd64.zip" -print) + exe_path="$PWD/build/extracted" + unzip "$zip" -d "$exe_path" + config_name="telegraf_windows.conf" + exe_path=$(/bin/find "$exe_path" -name telegraf.exe -type f -print) +else + tar_path=$(find /build/dist -maxdepth 1 -name "*linux_amd64.tar.gz" -print | grep -v ".*static.*") + mkdir "$exe_path" + tar --extract --file="$tar_path" --directory "$exe_path" + exe_path=$(find "$exe_path" -name telegraf -type f -print | grep ".*usr/bin/.*") +fi + +$exe_path config > $config_name + +mkdir ./new-config +mv $config_name ./new-config diff --git a/scripts/install_gotestsum.sh b/scripts/install_gotestsum.sh new file mode 100755 index 0000000000000..0b813e20879fa --- /dev/null +++ b/scripts/install_gotestsum.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +OS=$1 +EXE=$2 +VERSION="1.7.0" + +WINDOWS_SHA="7ae12ddb171375f0c14d6a09dd27a5c1d1fc72edeea674e3d6e7489a533b40c1" +DARWIN_SHA="a8e2351604882af1a67601cbeeacdcfa9b17fc2f6fbac291cf5d434efdf2d85b" +LINUX_SHA="b5c98cc408c75e76a097354d9487dca114996e821b3af29a0442aa6c9159bd40" + +setup_gotestsum () { + echo "installing gotestsum" + curl -L "https://github.com/gotestyourself/gotestsum/releases/download/v${VERSION}/gotestsum_${VERSION}_${OS}_amd64.tar.gz" --output gotestsum.tar.gz + + if [ "$OS" = "windows" ]; then + SHA=$WINDOWS_SHA + SHATOOL="sha256sum" + elif [ "$OS" = "darwin" ]; then + SHA=$DARWIN_SHA + SHATOOL="shasum --algorithm 256" + elif [ "$OS" = "linux" ]; then + SHA=$LINUX_SHA + SHATOOL="sha256sum" + fi + + if ! echo "${SHA} gotestsum.tar.gz" | ${SHATOOL} --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + tar --extract --file=gotestsum.tar.gz "${EXE}" +} + +if test -f "${EXE}"; then + echo "gotestsum is already installed" + v=$(./"${EXE}" --version) + echo "$v is installed, required version is ${VERSION}" + if [ "$v" != "gotestsum version ${VERSION}" ]; then + setup_gotestsum + ${EXE} --version + fi +else + setup_gotestsum +fi diff --git a/scripts/installgo_mac.sh b/scripts/installgo_mac.sh new file mode 100644 index 0000000000000..f15aefa6a1641 --- /dev/null +++ b/scripts/installgo_mac.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +set -eux + +ARCH=$(uname -m) +GO_VERSION="1.17.2" +if [ "$ARCH" = 'arm64' ]; then + GO_ARCH="darwin-arm64" + GO_VERSION_SHA="ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904" # from https://golang.org/dl +elif [ "$ARCH" = 'x86_64' ]; then + GO_ARCH="darwin-amd64" + GO_VERSION_SHA="7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94" # from https://golang.org/dl +fi + +# This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) +path="/usr/local/Cellar" +sudo mkdir -p ${path} + +# Download Go and verify Go tarball. (Note: we aren't using brew because +# it is slow to update and we can't pull specific minor versions.) +setup_go () { + echo "installing go" + curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz + if ! echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum --algorithm 256 --check -; then + echo "Checksum failed" >&2 + exit 1 + fi + + sudo rm -rf ${path}/go + sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz + sudo mkdir -p /usr/local/bin + sudo ln -sf ${path}/go/bin/go /usr/local/bin/go + sudo ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi diff --git a/scripts/installgo_windows.sh b/scripts/installgo_windows.sh new file mode 100644 index 0000000000000..bd5dcca3dbc14 --- /dev/null +++ b/scripts/installgo_windows.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +set -eux + +GO_VERSION="1.17.2" + +setup_go () { + choco upgrade golang --version=${GO_VERSION} + choco install make + git config --system core.longpaths true + rm -rf /c/Go + cp -r /c/Program\ Files/Go /c/ +} + +if command -v go >/dev/null 2>&1; then + echo "Go is already installed" + v=$(go version | { read -r _ _ v _; echo "${v#go}"; }) + echo "$v is installed, required version is ${GO_VERSION}" + if [ "$v" != ${GO_VERSION} ]; then + setup_go + go version + fi +else + setup_go +fi diff --git a/scripts/mac_installgo.sh b/scripts/mac_installgo.sh deleted file mode 100644 index aab4731c22f30..0000000000000 --- a/scripts/mac_installgo.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -set -eux - -GO_ARCH="darwin-amd64" -GO_VERSION="1.16.6" -GO_VERSION_SHA="e4e83e7c6891baa00062ed37273ce95835f0be77ad8203a29ec56dbf3d87508a" # from https://golang.org/dl - -# This path is cachable. (Saving in /usr/local/ would cause issues restoring the cache.) -path="/usr/local/Cellar" - -# Download Go and verify Go tarball. (Note: we aren't using brew because -# it is slow to update and we can't pull specific minor versions.) -setup_go () { - echo "installing go" - curl -L https://golang.org/dl/go${GO_VERSION}.${GO_ARCH}.tar.gz --output go${GO_VERSION}.${GO_ARCH}.tar.gz - echo "${GO_VERSION_SHA} go${GO_VERSION}.${GO_ARCH}.tar.gz" | shasum -a 256 --check - sudo rm -rf ${path}/go - sudo tar -C $path -xzf go${GO_VERSION}.${GO_ARCH}.tar.gz - ln -sf ${path}/go/bin/go /usr/local/bin/go - ln -sf ${path}/go/bin/gofmt /usr/local/bin/gofmt -} - -if command -v go &> /dev/null; then - echo "Go is already installed" - v=`go version | { read _ _ v _; echo ${v#go}; }` - echo "$v is installed, required version is ${GO_VERSION}" - if [ "$v" != ${GO_VERSION} ]; then - setup_go - go version - fi -else - setup_go -fi diff --git a/scripts/release.sh b/scripts/release.sh index b445efc0494b3..22cac0a09cf53 100644 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -103,6 +103,16 @@ do done < manifest echo "" +package="$(grep *_darwin_amd64.dmg manifest | cut -f2 -d' ')" +cat -<