diff --git a/.circleci/config.yml b/.circleci/config.yml index 028198bbdb236..c7821fab6cc53 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: go-1_17: working_directory: '/go/src/github.com/influxdata/telegraf' docker: - - image: 'quay.io/influxdb/telegraf-ci:1.17.2' + - image: 'quay.io/influxdb/telegraf-ci:1.17.3' environment: GOFLAGS: -p=8 mac: @@ -32,10 +32,6 @@ commands: - store_artifacts: path: './new-config' destination: 'new-config' - - persist_to_workspace: - root: './new-config' - paths: - - '*' check-changed-files-or-halt: steps: - run: ./scripts/check-file-changes.sh @@ -399,6 +395,8 @@ jobs: share-artifacts: executor: aws-cli/default steps: + - checkout + - check-changed-files-or-halt - run: command: | PR=${CIRCLE_PULL_REQUEST##*/} @@ -415,13 +413,6 @@ jobs: steps: - generate-config: os: windows - update-config: - executor: go-1_17 - steps: - - checkout - - attach_workspace: - at: '/new-config' - - run: ./scripts/update_config.sh ${UPDATE_CONFIG_TOKEN} commonjobs: - &test-awaiter @@ -509,14 +500,6 @@ workflows: branches: only: - master - - 'update-config': - requires: - - 'generate-config-win' - - 'generate-config' - filters: - branches: - only: - - master - 'share-artifacts': requires: - 'i386-package' @@ -535,6 +518,9 @@ workflows: branches: ignore: - master + - release.* + tags: + ignore: /.*/ - 'release': requires: - 'test-go-windows' diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml index eb6187bc2f382..a9b657f105056 100644 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -1,6 +1,5 @@ name: Bug Report -description: File a bug report -title: "[Bug]: " +description: Create a bug report to help us improve labels: ["bug"] body: - type: markdown diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md deleted file mode 100644 index 28c6237ac75d1..0000000000000 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Bug report -labels: bug -about: Create a report to help us improve - ---- - - -### Relevant telegraf.conf: - -```toml - -``` - -### System info: - - - -### Docker - - - -### Steps to reproduce: - - - -1. ... -2. ... - -### Expected behavior: - - - -### Actual behavior: - - - -### Additional info: - - diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000000000..104d71db2230a --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,58 @@ +--- +################################# +################################# +## Super Linter GitHub Actions ## +################################# +################################# +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master, main] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, main] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: github/super-linter@v4.8.1 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LINTER_RULES_PATH: '.' + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_MARKDOWN: true diff --git a/.golangci.yml b/.golangci.yml index 470fc116bfb37..ddd7a0228fd8c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,10 +21,6 @@ linters: - varcheck linters-settings: - # custom: - # telegraflinter: - # path: telegraflinter.so - # description: "Find Telegraf specific review criteria, more info: https://github.com/influxdata/telegraf/wiki/Review" revive: rules: - name: argument-limit @@ -130,5 +126,10 @@ issues: - path: _test\.go text: "parameter.*seems to be a control flag, avoid control coupling" + - path: (^agent/|^cmd/|^config/|^filter/|^internal/|^logger/|^metric/|^models/|^selfstat/|^testutil/|^plugins/serializers/) + text: "imports-blacklist: should not use the following blacklisted import: \"log\"" + linters: + - revive + output: format: tab diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000000..893179487d310 --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,6 @@ +{ + "MD013": false, + "MD033": { + "allowed_elements": ["br"] + } +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 8760b914b7f95..e8054b074f413 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,196 +1,354 @@ + + +# Change Log + +## v1.21.0-rc1 [2021-12-08] + +### Bugfixes + + - [#10196](https://github.com/influxdata/telegraf/pull/10196) `outputs.elasticsearch` Implement NaN and inf handling fo r elasticsearch output + - [#10205](https://github.com/influxdata/telegraf/pull/10205) Print loaded plugins and deprecations for once and test f lags + - [#10214](https://github.com/influxdata/telegraf/pull/10214) `processors.ifname` Eliminate MIB dependency for ifname p rocessor + - [#10206](https://github.com/influxdata/telegraf/pull/10206) `inputs.snmp` Optimize locking for SNMP MIBs loading + - [#9975](https://github.com/influxdata/telegraf/pull/9975) `inputs.kube_inventory` Set TLS server name config properly + - [#10230](https://github.com/influxdata/telegraf/pull/10230) Sudden close of Telegraf caused by OPC UA input plugin + - [#9913](https://github.com/influxdata/telegraf/pull/9913) Update github.com/eclipse/paho.mqtt.golang module from 1.3. 0 to 1.3.5 + - [#10221](https://github.com/influxdata/telegraf/pull/10221) `parsers.json_v2` Parser timestamp setting order + - [#10209](https://github.com/influxdata/telegraf/pull/10209) `outputs.graylog` Ensure graylog spec fields not prefixed with _ + - [#10099](https://github.com/influxdata/telegraf/pull/10099) `inputs.zfs` Pool detection and metrics gathering for ZFS >= 2.1.x + - [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor + - [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer re quires all three fields + - [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf + - [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile + +## v1.21.0-rc0 [2021-12-01] + +### Release Notes + +Thank you to @zak-pawel for lots of linter fixes! + +### Bugfixes + + - [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` fix cloudwatch metrics collection + - [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` fix register bigquery to output plugins + - [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` fix sysstat to use unique temp file vs hard-coded + - [#10046](https://github.com/influxdata/telegraf/pull/10046) fix update nats-sever to support openbsd + - [#10091](https://github.com/influxdata/telegraf/pull/10091) `inputs.prometheus` fix check error before defer in prometheus k8s + - [#10101](https://github.com/influxdata/telegraf/pull/10101) `inputs.win_perf_counters` fix add setting to win_perf_counters input to ignore localization + - [#10136](https://github.com/influxdata/telegraf/pull/10136) `inputs.snmp_trap` fix remove snmptranslate from readme and fix default path + - [#10116](https://github.com/influxdata/telegraf/pull/10116) `inputs.statsd` fix input plugin statsd parse error + - [#10131](https://github.com/influxdata/telegraf/pull/10131) fix skip knxlistener when writing the sample config + - [#10119](https://github.com/influxdata/telegraf/pull/10119) `inputs.cpu` update shirou/gopsutil to v3 + - [#10074](https://github.com/influxdata/telegraf/pull/10074) `outputs.graylog` fix failing test due to port already in use + - [#9865](https://github.com/influxdata/telegraf/pull/9865) `inputs.directory_monitor` fix directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 + - [#9862](https://github.com/influxdata/telegraf/pull/9862) `outputs.graylog` fix graylog plugin TLS support and message format + - [#9908](https://github.com/influxdata/telegraf/pull/9908) `parsers.json_v2` fix remove dead code + - [#9881](https://github.com/influxdata/telegraf/pull/9881) `outputs.graylog` fix mute graylog UDP/TCP tests by marking them as integration + - [#9751](https://github.com/influxdata/telegraf/pull/9751) bump google.golang.org/grpc from 1.39.1 to 1.40.0 + +### Features + + - [#10200](https://github.com/influxdata/telegraf/pull/10200) `aggregators.deprecations.go` Implement deprecation infrastructure + - [#9518](https://github.com/influxdata/telegraf/pull/9518) `inputs.snmp` snmp to use gosmi + - [#10130](https://github.com/influxdata/telegraf/pull/10130) `outputs.influxdb_v2` add retry to 413 errors with InfluxDB output + - [#10144](https://github.com/influxdata/telegraf/pull/10144) `inputs.win_services` add exclude filter + - [#9995](https://github.com/influxdata/telegraf/pull/9995) `inputs.mqtt_consumer` enable extracting tag values from MQTT topics + - [#9419](https://github.com/influxdata/telegraf/pull/9419) `aggregators.all` add support of aggregator as Starlark script + - [#9561](https://github.com/influxdata/telegraf/pull/9561) `processors.regex` extend regexp processor do allow renaming of measurements, tags and fields + - [#8184](https://github.com/influxdata/telegraf/pull/8184) `outputs.http` add use_batch_format for HTTP output plugin + - [#9988](https://github.com/influxdata/telegraf/pull/9988) `inputs.kafka_consumer` add max_processing_time config to Kafka Consumer input + - [#9841](https://github.com/influxdata/telegraf/pull/9841) `inputs.sqlserver` add additional metrics to support elastic pool (sqlserver plugin) + - [#9910](https://github.com/influxdata/telegraf/pull/9910) `common.tls` filter client certificates by DNS names + - [#9942](https://github.com/influxdata/telegraf/pull/9942) `outputs.azure_data_explorer` add option to skip table creation in azure data explorer output + - [#9984](https://github.com/influxdata/telegraf/pull/9984) `processors.ifname` add more details to logmessages + - [#9833](https://github.com/influxdata/telegraf/pull/9833) `common.kafka` add metadata full to config + - [#9876](https://github.com/influxdata/telegraf/pull/9876) update etc/telegraf.conf and etc/telegraf_windows.conf + - [#9256](https://github.com/influxdata/telegraf/pull/9256) `inputs.modbus` modbus connection settings (serial) + - [#9860](https://github.com/influxdata/telegraf/pull/9860) `inputs.directory_monitor` adds the ability to create and name a tag containing the filename using the directory monitor input plugin + - [#9740](https://github.com/influxdata/telegraf/pull/9740) `inputs.prometheus` add ignore_timestamp option + - [#9513](https://github.com/influxdata/telegraf/pull/9513) `processors.starlark` starlark processor example for processing sparkplug_b messages + - [#9449](https://github.com/influxdata/telegraf/pull/9449) `parsers.json_v2` support defining field/tag tables within an object table + - [#9827](https://github.com/influxdata/telegraf/pull/9827) `inputs.elasticsearch_query` add debug query output to elasticsearch_query + - [#9241](https://github.com/influxdata/telegraf/pull/9241) `inputs.snmp` telegraf to merge tables with different indexes + - [#9013](https://github.com/influxdata/telegraf/pull/9013) `inputs.opcua` allow user to select the source for the metric timestamp. + - [#9706](https://github.com/influxdata/telegraf/pull/9706) `inputs.puppetagent` add measurements from puppet 5 + - [#9644](https://github.com/influxdata/telegraf/pull/9644) `outputs.graylog` add graylog plugin TCP support + - [#8229](https://github.com/influxdata/telegraf/pull/8229) `outputs.azure_data_explorer` add json_timestamp_layout option + +### New Input Plugins + + - [#9724](https://github.com/influxdata/telegraf/pull/9724) `inputs.all` feat: add intel_pmu plugin + - [#9771](https://github.com/influxdata/telegraf/pull/9771) `inputs.all` feat: add Linux Volume Manager input plugin + - [#9236](https://github.com/influxdata/telegraf/pull/9236) `inputs.all` feat: Openstack input plugin + +### New Output Plugins + + - [#9891](https://github.com/influxdata/telegraf/pull/9891) `outputs.all` feat: add new groundwork output plugin + - [#9923](https://github.com/influxdata/telegraf/pull/9923) `common.tls` feat: add mongodb output plugin + - [#9346](https://github.com/influxdata/telegraf/pull/9346) `outputs.all` feat: Azure Event Hubs output plugin + +## v1.20.4 [2021-11-17] + +### Release Notes + +- [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 +- [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + +- [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* +- [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* +- [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* +- [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +### Bugfixes + +- [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 +- [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI +- [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 +- [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up +- [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int +- [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd +- [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library +- [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" +- [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly +- [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 +- [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver +- [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs +- [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +## v1.20.3 [2021-10-27] + +### Release Notes + +- [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +### Bugfixes + +- [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 +- [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs +- [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps +- [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 +- [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation +- [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests +- [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library +- [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys +- [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD +- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory +- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin +- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field +- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels +- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size +- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook +- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset +- [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 +- [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 +- [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 +- [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql +- [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible +- [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place +- [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 +- [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 +- [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage +- [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +### New External Plugins + +- [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka +- [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + ## v1.20.2 [2021-10-07] -#### Bugfixes +### Bugfixes - - [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API - - [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields - - [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser - - [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 - - [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built +- [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API +- [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields +- [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser +- [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 +- [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built ## v1.20.1 [2021-10-06] -#### Bugfixes - - - [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 - - [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 - - [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 - - [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference - - [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging - - [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config - - [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags - - [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version - - [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing - - [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation - - [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client - - [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 - - [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module - - [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 - - [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 - - [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 - -#### Features - - - [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field +### Bugfixes + +- [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 +- [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 +- [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 +- [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference +- [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging +- [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config +- [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags +- [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version +- [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing +- [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation +- [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client +- [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module +- [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 +- [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 +- [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +### Features + +- [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field ## v1.20.0 [2021-09-17] -#### Release Notes - - - [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 - -#### Bugfixes - - - [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 - - [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests - - [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 - - [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 - - [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives - - [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds - - [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 - - [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version - - [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value - - [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. - - [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 - - [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query - - [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 - - [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats - - [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 - - [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 - - [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names - - [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 - - [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 - - [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error - - [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging - - [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 - - [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module - - [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak - - [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting - -#### Features - - - [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support - - [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype - - [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces - - [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider - - [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP - - [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children - - [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type - - [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog - - [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page - - [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag - - [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins - - [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support - - [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name - - [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser - - [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance - - [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url - - [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status - - [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) - - [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 - -#### New Input Plugins - - - [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs - - [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection - - [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input - - [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin - -#### New Output Plugins - - - [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output - - [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output +### Release Notes + +- [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +### Bugfixes + +- [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 +- [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests +- [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 +- [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 +- [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives +- [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds +- [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 +- [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version +- [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value +- [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. +- [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 +- [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query +- [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 +- [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats +- [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 +- [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 +- [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names +- [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 +- [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 +- [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error +- [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging +- [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 +- [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module +- [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak +- [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting +### Features + +- [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support +- [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype +- [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces +- [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider +- [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP +- [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children +- [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type +- [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog +- [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page +- [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag +- [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins +- [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support +- [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name +- [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser +- [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance +- [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url +- [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status +- [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) +- [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 + +### New Input Plugins + +- [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs +- [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection +- [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input +- [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin + +### New Output Plugins + +- [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output +- [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output ## v1.19.3 [2021-08-18] -#### Bugfixes - - - [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 - - [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 - - [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 - - [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 - - [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery - - [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation - - [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores - - [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set - - [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection - - [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api - - [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki - - [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 - - [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error - - [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 - - [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path - - [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 - - [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 - - [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered +### Bugfixes + +- [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 +- [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 +- [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 +- [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery +- [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation +- [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores +- [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set +- [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection +- [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api +- [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki +- [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 +- [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error +- [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 +- [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path +- [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 +- [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 +- [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered ## v1.19.2 [2021-07-28] -#### Release Notes - - - [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 - -#### Bugfixes - - - [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions - - [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written - - [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims - - [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting - - [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column - - [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* - - [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics - - [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name - - [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics - - [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection - - [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled - - [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure - - [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map - - [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication - - [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers - - [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles - - [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log - - [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 - - [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups - - [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly - -#### Features - - - [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified +### Release Notes + +- [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +### Bugfixes + +- [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions +- [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written +- [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims +- [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting +- [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column +- [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* +- [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics +- [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name +- [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics +- [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection +- [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled +- [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure +- [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map +- [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication +- [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers +- [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles +- [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log +- [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 +- [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups +- [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +### Features + +- [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified ## v1.19.1 [2021-07-07] -#### Bugfixes - - - [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified - - [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory - - [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic - - [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic - - [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic - - [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression - - [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 - - [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error - - [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error - - [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 - - [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 - - [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 - - [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics - - [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify - - [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https - - [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 - - [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys - - [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support - - [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 - - [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* +### Bugfixes + +- [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified +- [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory +- [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic +- [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic +- [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic +- [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression +- [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 +- [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error +- [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error +- [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 +- [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 +- [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 +- [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics +- [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify +- [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https +- [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 +- [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys +- [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support +- [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 +- [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* ## v1.19.0 [2021-06-17] -#### Release Notes +### Release Notes - Many linter fixes - thanks @zak-pawel and all! - [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 -#### Bugfixes +### Bugfixes - [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 - [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: @@ -207,7 +365,7 @@ - [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format - [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries -#### Features +### Features - [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL - [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output @@ -241,26 +399,26 @@ - [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option - [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric -#### New Input Plugins +### New Input Plugins -- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov - [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble - [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak - [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda - [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan -#### New Output Plugins +### New Output Plugins - [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia - [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio - [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov -#### New Parser Plugins +### New Parser Plugins - [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa - [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink -#### New External Plugins +### New External Plugins - [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon - [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard @@ -269,486 +427,486 @@ ## v1.18.3 [2021-05-20] -#### Release Notes - - - Added FreeBSD armv7 build - -#### Bugfixes +### Release Notes - - [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics - - [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 - - [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error - - [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query - - [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 - - [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs - - [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 - - [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 - - [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 - - [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 - - [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 - - [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 - - [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 - - [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go +- Added FreeBSD armv7 build + +### Bugfixes + +- [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics +- [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 +- [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error +- [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query +- [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 +- [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs +- [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 +- [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 +- [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 +- [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 +- [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 +- [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 +- [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 +- [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go -#### Features +### Features - - [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression +- [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression ## v1.18.2 [2021-04-28] -#### Bugfixes +### Bugfixes - - [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings - - [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo - - [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls - - [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write - - [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures - - [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner - - [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later - - [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name - - [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling +- [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings +- [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo +- [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls +- [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write +- [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures +- [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner +- [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later +- [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name +- [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling ## v1.18.1 [2021-04-07] -#### Bugfixes - - - [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 - - [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override - - [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat - - [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed - - [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode - - [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently - - [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id - - [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object - - [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation - - [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic - - [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats - - [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode - - [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg +### Bugfixes + +- [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 +- [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override +- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat +- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed +- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode +- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently +- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id +- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object +- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation +- [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic +- [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats +- [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode +- [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg ## v1.18.0 [2021-03-17] -#### Release Notes - - - Support Go version 1.16.2 - - Added support for code signing in Windows - -#### Bugfixes - - - [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice - - [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling - - [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list - - [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin - - [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues - - [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count - - [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions - - [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin - - [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug - - [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types - - [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache - - [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser - - [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies - - [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. - - [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true - - [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL - - [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set - - [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file - -#### Features - - - [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin - - [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality - - [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy - - [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) - - [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest - - [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols - - [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging - - [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin - - [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric - - [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url - - [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows - - [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script - - [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin - - [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input - - [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON - - [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) - - [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients - - [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only - -#### New Inputs - - [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch - - [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog - - [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData - - [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey - - [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga - -#### New Outputs - - [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac - - [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura - - [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey - - [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb - -#### New Aggregators - - [Derivative Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative)- Contributed by @KarstenSchnitter - - [Quantile Aggregator Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan - -#### New Processors - - [AWS EC2 Metadata Processor Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo - -#### New Parsers - - [XML Parser Plugin ](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan - -#### New Serializers - - [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox +### Release Notes -#### New External Plugins - - [GeoIP Processor Plugin ](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali - - [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat - - [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope - -## v1.17.3 [2021-02-17] +- Support Go version 1.16.2 +- Added support for code signing in Windows + +### Bugfixes + +- [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice +- [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling +- [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list +- [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin +- [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues +- [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count +- [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions +- [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin +- [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug +- [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types +- [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache +- [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser +- [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies +- [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. +- [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true +- [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL +- [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set +- [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file -#### Bugfixes +### Features - - [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files - - [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 - - [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 - - [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value - - [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue - - [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt - - [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors - - [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper - - [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config - - [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline - - [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux - - [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version +- [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin +- [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality +- [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy +- [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) +- [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest +- [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols +- [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging +- [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin +- [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric +- [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url +- [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows +- [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script +- [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin +- [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input +- [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON +- [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) +- [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients +- [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only +### New Inputs -## v1.17.2 [2021-01-28] +- [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch +- [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog +- [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData +- [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey +- [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga + +### New Outputs + +- [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac +- [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura +- [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey +- [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb + +### New Aggregators + +- [Derivative Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative) - Contributed by @KarstenSchnitter +- [Quantile Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan + +### New Processors + +- [AWS EC2 Metadata Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo -#### Bugfixes +### New Parsers + +- [XML Parser Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan + +### New Serializers - - [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native - - [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function +- [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox + +### New External Plugins + +- [GeoIP Processor Plugin](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali +- [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat +- [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + +## v1.17.3 [2021-02-17] +### Bugfixes + +- [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files +- [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 +- [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 +- [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value +- [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue +- [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt +- [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors +- [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper +- [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config +- [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline +- [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux +- [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + +## v1.17.2 [2021-01-28] + +### Bugfixes + +- [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native +- [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function ## v1.17.1 [2021-01-27] -#### Release Notes - - Included a few more changes that add configuration options to plugins as it's been while since the last release - - - [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool - - [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows - - [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible - - [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser - - [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout - - [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames - - [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C - - [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input - - [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin - - [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection - -#### Bugfixes - - - [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses - - [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors - - [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. - - [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value - - [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. - - [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin - - [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue - - [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 - - [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 - - [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 - - [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. - - [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. - - [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. - - [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge - - [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses - - [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 - - [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start - -#### New External Plugins - - - [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin +### Release Notes +Included a few more changes that add configuration options to plugins as it's been while since the last release + +- [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool +- [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows +- [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible +- [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser +- [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout +- [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames +- [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C +- [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input +- [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin +- [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +### Bugfixes + +- [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses +- [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors +- [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. +- [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value +- [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. +- [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin +- [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue +- [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 +- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 +- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 +- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. +- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. +- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. +- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge +- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses +- [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 +- [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + +### New External Plugins + +- [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin ## v1.17.0 [2020-12-18] -#### Release Notes - - - Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. - - New input plugins: Riemann-Protobuff Listener, Intel PowerStat - - New output plugins: Yandex.Cloud monitoring, Logz.io - - New parser plugin: Prometheus - - New serializer: Prometheus remote write - -#### Bugfixes - - - [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter - - [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. - - [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests - - [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test - - [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 - - [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition - - [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write - - [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there - - [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data - - [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser - - [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers - - [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits - - [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory - -#### Features - - - [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement - - [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin - - [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI - - [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries - - [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. - - [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers - - [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries - - [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call - - [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB - - [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] - - [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty - - [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin - - [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le - - [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) - - [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data - - [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input - - [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin - - [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input - - [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values - - [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes - - [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin - - [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 - - [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin - - [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin - - [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input - - [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics - - [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor - - [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added - - [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support - - -#### New Parser Plugins - - - [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus - -#### New Serializer Plugins - - - [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer - -#### New Input Plugins - - - [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener - - [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin - -#### New Output Plugins - - - [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring - - [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin +### Release Notes +- Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. +- New input plugins: Riemann-Protobuff Listener, Intel PowerStat +- New output plugins: Yandex.Cloud monitoring, Logz.io +- New parser plugin: Prometheus +- New serializer: Prometheus remote write + +### Bugfixes + +- [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter +- [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. +- [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests +- [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test +- [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 +- [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition +- [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write +- [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there +- [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data +- [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser +- [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers +- [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits +- [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory -## v1.16.3 [2020-12-01] +### Features + +- [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement +- [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin +- [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI +- [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries +- [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. +- [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers +- [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries +- [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call +- [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB +- [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] +- [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty +- [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin +- [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le +- [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) +- [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data +- [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input +- [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin +- [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input +- [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values +- [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes +- [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin +- [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 +- [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin +- [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin +- [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input +- [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics +- [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor +- [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added +- [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + +### New Parser Plugins + +- [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +### New Serializer Plugins + +- [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + +### New Input Plugins + +- [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener +- [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin + +### New Output Plugins + +- [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring +- [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin -#### Bugfixes - - - [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 - - [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 - - [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype - - [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name - - [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" - - [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency - - [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor - - [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column - - [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output - - [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor - - [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark - - [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list - - [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging - - [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors - - [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function +## v1.16.3 [2020-12-01] +### Bugfixes + +- [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 +- [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 +- [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype +- [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name +- [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" +- [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency +- [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor +- [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column +- [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output +- [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor +- [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark +- [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list +- [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging +- [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors +- [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function ## v1.16.2 [2020-11-13] -#### Bugfixes - - - [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). - - [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs - - [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) - - [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme - - [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes - - [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config - - [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests - - [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 - - [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 - - [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag - - [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding - - [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 - - [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test - - [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin +### Bugfixes + +- [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). +- [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs +- [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) +- [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme +- [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes +- [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config +- [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests +- [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 +- [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 +- [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag +- [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding +- [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 +- [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test +- [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin ## v1.16.1 [2020-10-28] -#### Release Notes - - - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +### Release Notes -#### Bugfixes +- [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI - - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix - - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters - - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc - - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 - - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir - - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error - - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers +### Bugfixes +- [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix +- [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters +- [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc +- [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 +- [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir +- [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error +- [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers ## v1.16.0 [2020-10-21] -#### Release Notes - - - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release - - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor - -#### Features - - - [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" - - [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag - - [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter - - [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin - - [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type - - [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin - - [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents - - [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin - - [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option - - [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin - - [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin - - [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output - - [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric - - [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route - - [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark - - [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) - - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting - -#### Bugfixes - - - [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config - - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds - - [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions - - [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c - - [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF - - [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target - - [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types - - [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth - - [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure - - [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol - - [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters - - [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging - - [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic - - [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan - - [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping - - [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset - - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform - - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - -#### New Input Plugins - - - [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair - - [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak - - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient - - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv - -#### New Output Plugins - - - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo - - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest - -#### New External Plugins - - See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins - - - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +### Release Notes -## v1.15.4 [2020-10-20] +- New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) +- [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck +- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 +- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd +- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release +- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor -#### Bugfixes +### Features - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - - [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging +- [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" +- [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag +- [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter +- [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin +- [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type +- [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin +- [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents +- [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin +- [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option +- [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin +- [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin +- [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output +- [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric +- [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route +- [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark +- [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) +- [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag +- [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) +- [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands +- [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 +- [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code +- [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting + +### Bugfixes + +- [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config +- [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags +- [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed +- [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors +- [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements +- [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored +- [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF +- [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds +- [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions +- [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c +- [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF +- [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target +- [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types +- [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth +- [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure +- [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol +- [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters +- [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging +- [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic +- [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan +- [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping +- [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset +- [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression +- [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 +- [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform +- [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + +### New Input Plugins + +- [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair +- [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak +- [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode +- [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData +- [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient +- [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak +- [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv + +### New Output Plugins + +- [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue +- [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo +- [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest + +### New External Plugins + +See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins + +- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. +- [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos +- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. -## v1.15.3 [2020-09-11] +## v1.15.4 [2020-10-20] -#### Release Notes +### Bugfixes - - Many documentation updates - - New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd +- [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging -#### Bugfixes +## v1.15.3 [2020-09-11] - - [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition - - [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 - - [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError - - [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration - - [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer - - [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 - - [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec - - [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 - - [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests - - [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor - - [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url - - [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf +### Release Notes + +- Many documentation updates +- New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) + +### Bugfixes + +- [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition +- [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 +- [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError +- [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration +- [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer +- [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 +- [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec +- [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 +- [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests +- [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor +- [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url +- [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf ## v1.15.2 [2020-07-31] -#### Bug Fixes +### Bug Fixes - [#7905](https://github.com/influxdata/telegraf/issues/7905): Fix RPM /var/log/telegraf permissions - [#7880](https://github.com/influxdata/telegraf/issues/7880): Fix tail following on EOF ## v1.15.1 [2020-07-22] -#### Bug Fixes +### Bug Fixes - [#7877](https://github.com/influxdata/telegraf/pull/7877): Fix architecture in non-amd64 deb and rpm packages. ## v1.15.0 [2020-07-22] -#### Release Notes +### Release Notes - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. @@ -770,12 +928,12 @@ `/etc/telegraf/telegraf.conf.sample`. The tar and zip packages now include the version in the top level directory. -#### New Inputs +### New Inputs - [nginx_sts](/plugins/inputs/nginx_sts/README.md) - Contributed by @zdmytriv - [redfish](/plugins/inputs/redfish/README.md) - Contributed by @sarvanikonda -#### New Processors +### New Processors - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [execd](/plugins/processors/execd/README.md) - Contributed by @influxdata @@ -785,12 +943,12 @@ - [reverse_dns](/plugins/processors/reverse_dns/README.md) - Contributed by @influxdata - [starlark](/plugins/processors/starlark/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi - [execd](/plugins/outputs/execd/README.md) - Contributed by @influxdata -#### Features +### Features - [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. @@ -838,7 +996,7 @@ - [#7154](https://github.com/influxdata/telegraf/pull/7154): Add v3 metadata support to ecs input. - [#7792](https://github.com/influxdata/telegraf/pull/7792): Support utf-16 in file and tail inputs. -#### Bug Fixes +### Bug Fixes - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. @@ -857,7 +1015,7 @@ ## v1.14.5 [2020-06-30] -#### Bug Fixes +### Bug Fixes - [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve the performance of the procstat input. - [#7658](https://github.com/influxdata/telegraf/pull/7658): Fix ping exit code handling on non-Linux. @@ -869,7 +1027,7 @@ ## v1.14.4 [2020-06-09] -#### Bug Fixes +### Bug Fixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. - [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. @@ -878,7 +1036,7 @@ ## v1.14.3 [2020-05-19] -#### Bug Fixes +### Bug Fixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. - [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. @@ -887,7 +1045,7 @@ ## v1.14.2 [2020-04-28] -#### Bug Fixes +### Bug Fixes - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. @@ -901,7 +1059,7 @@ ## v1.14.1 [2020-04-14] -#### Bug Fixes +### Bug Fixes - [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. - [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. @@ -911,7 +1069,7 @@ ## v1.14 [2020-03-26] -#### Release Notes +### Release Notes - In the `sqlserver` input, the `sqlserver_azurestats` measurement has been renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric @@ -920,7 +1078,7 @@ - The `date` processor now uses the UTC timezone when creating its tag. In previous versions the local time was used. -#### New Inputs +### New Inputs - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen @@ -932,17 +1090,17 @@ - [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI -#### New Processors +### New Processors - [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern - [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour -#### New Outputs +### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert -#### Features +### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. @@ -981,7 +1139,7 @@ - [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. - [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. -#### Bug Fixes +### Bug Fixes - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. @@ -997,11 +1155,11 @@ ## v1.13.4 [2020-02-25] -#### Release Notes +### Release Notes - Official packages now built with Go 1.13.8. -#### Bug Fixes +### Bug Fixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. @@ -1013,7 +1171,7 @@ ## v1.13.3 [2020-02-04] -#### Bug Fixes +### Bug Fixes - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. @@ -1022,7 +1180,7 @@ ## v1.13.2 [2020-01-21] -#### Bug Fixes +### Bug Fixes - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. @@ -1034,7 +1192,7 @@ ## v1.13.1 [2020-01-08] -#### Bug Fixes +### Bug Fixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. @@ -1049,7 +1207,7 @@ ## v1.13 [2019-12-12] -#### Release Notes +### Release Notes - Official packages built with Go 1.13.5. This affects the minimum supported version on several platforms, most notably requiring Windows 7 (2008 R2) or @@ -1061,7 +1219,7 @@ passthrough metrics will be unchanged. Refer to the `prometheus` input for details about the mapping. -#### New Inputs +### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston @@ -1070,15 +1228,15 @@ - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream - [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer -#### New Processors +### New Processors - [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt -#### New Aggregators +### New Aggregators - [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata -#### Features +### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. @@ -1120,7 +1278,7 @@ - [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. - [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. -#### Bug Fixes +### Bug Fixes - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. @@ -1137,7 +1295,7 @@ ## v1.12.6 [2019-11-19] -#### Bug Fixes +### Bug Fixes - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. @@ -1146,7 +1304,7 @@ ## v1.12.5 [2019-11-12] -#### Bug Fixes +### Bug Fixes - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. @@ -1160,11 +1318,11 @@ ## v1.12.4 [2019-10-23] -#### Release Notes +### Release Notes - Official packages built with Go 1.12.12. -#### Bug Fixes +### Bug Fixes - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. - [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. @@ -1172,7 +1330,7 @@ ## v1.12.3 [2019-10-07] -#### Bug Fixes +### Bug Fixes - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. @@ -1184,7 +1342,7 @@ ## v1.12.2 [2019-09-24] -#### Bug Fixes +### Bug Fixes - [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. @@ -1194,7 +1352,7 @@ ## v1.12.1 [2019-09-10] -#### Bug Fixes +### Bug Fixes - [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. - [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. @@ -1207,14 +1365,14 @@ ## v1.12 [2019-09-03] -#### Release Notes +### Release Notes - The cluster health related fields in the elasticsearch input have been split out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. -#### New Inputs +### New Inputs - [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu @@ -1224,22 +1382,22 @@ - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao -#### New Parsers +### New Parsers - [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev -#### New Processors +### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata - [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata - [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo -#### Features +### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. @@ -1291,7 +1449,7 @@ - [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. - [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. @@ -1308,7 +1466,7 @@ ## v1.11.5 [2019-08-27] -#### Bug Fixes +### Bug Fixes - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. @@ -1321,7 +1479,7 @@ ## v1.11.4 [2019-08-06] -#### Bug Fixes +### Bug Fixes - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. - [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. @@ -1329,7 +1487,7 @@ ## v1.11.3 [2019-07-23] -#### Bug Fixes +### Bug Fixes - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. @@ -1342,7 +1500,7 @@ ## v1.11.2 [2019-07-09] -#### Bug Fixes +### Bug Fixes - [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. - [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. @@ -1353,7 +1511,7 @@ ## v1.11.1 [2019-06-25] -#### Bug Fixes +### Bug Fixes - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. @@ -1367,7 +1525,7 @@ ## v1.11 [2019-06-11] -#### Release Notes +### Release Notes - The `uptime_format` field in the system input has been deprecated, use the `uptime` field instead. @@ -1375,7 +1533,7 @@ requires `GetMetricData` permissions instead of `GetMetricStatistics`. The `units` tag is not available from this API and is no longer collected. -#### New Inputs +### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx @@ -1385,20 +1543,20 @@ - [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje -#### New Aggregators +### New Aggregators - [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto -#### New Outputs +### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo - [health](/plugins/outputs/health/README.md) - Contributed by @influxdata -#### New Serializers +### New Serializers - [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck -#### Features +### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. @@ -1430,7 +1588,7 @@ - [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. - [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. @@ -1456,7 +1614,7 @@ ## v1.10.4 [2019-05-14] -#### Bug Fixes +### Bug Fixes - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. @@ -1470,20 +1628,20 @@ ## v1.10.3 [2019-04-16] -#### Bug Fixes +### Bug Fixes - [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. - [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. ## v1.10.2 [2019-04-02] -#### Release Notes +### Release Notes - String fields no longer have leading and trailing quotation marks removed in the grok parser. If you are capturing quoted strings you may need to update the patterns. -#### Bug Fixes +### Bug Fixes - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. @@ -1503,7 +1661,7 @@ ## v1.10.1 [2019-03-19] -#### Bug Fixes +### Bug Fixes - [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. @@ -1515,7 +1673,7 @@ ## v1.10 [2019-03-05] -#### New Inputs +### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata @@ -1526,16 +1684,16 @@ - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 - [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 -#### New Outputs +### New Outputs - [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye -#### New Serializers +### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller - [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno -#### Features +### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. @@ -1573,7 +1731,7 @@ - [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. - [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. -#### Bug Fixes +### Bug Fixes - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. @@ -1589,7 +1747,7 @@ ## v1.9.5 [2019-02-26] -#### Bug Fixes +### Bug Fixes - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. @@ -1603,7 +1761,7 @@ ## v1.9.4 [2019-02-05] -#### Bug Fixes +### Bug Fixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. @@ -1612,7 +1770,7 @@ ## v1.9.3 [2019-01-22] -#### Bug Fixes +### Bug Fixes - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. @@ -1623,7 +1781,7 @@ ## v1.9.2 [2019-01-08] -#### Bug Fixes +### Bug Fixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. @@ -1642,7 +1800,7 @@ ## v1.9.1 [2018-12-11] -#### Bug Fixes +### Bug Fixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. @@ -1657,7 +1815,7 @@ ## v1.9 [2018-11-20] -#### Release Notes +### Release Notes - The `http_listener` input plugin has been renamed to `influxdb_listener` and use of the original name is deprecated. The new name better describes the @@ -1675,7 +1833,7 @@ the new option `max_undelivered_messages` to limit the number of outstanding unwritten metrics. -#### New Inputs +### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe @@ -1684,11 +1842,11 @@ - [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment -#### New Outputs +### New Outputs - [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment -#### Features +### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. @@ -1711,7 +1869,7 @@ - [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. - [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. -#### Bug Fixes +### Bug Fixes - [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. - [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. @@ -2095,7 +2253,6 @@ - The new `http` input configured with `data_format = "json"` can perform the same task as the, now deprecated, `httpjson` input. - ### New Inputs - [http](./plugins/inputs/http/README.md) - Thanks to @grange74 @@ -2214,6 +2371,7 @@ ## v1.5 [2017-12-14] ### New Plugins + - [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno - [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv - [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge @@ -2544,7 +2702,7 @@ machines. Telegraf < 1.3: -``` +```text # field_name value active+clean 123 active+clean+scrubbing 3 @@ -2552,7 +2710,7 @@ active+clean+scrubbing 3 Telegraf >= 1.3: -``` +```text # field_name value tag count 123 state=active+clean count 3 state=active+clean+scrubbing @@ -2862,7 +3020,7 @@ that pertain to node vs. namespace statistics. This means that the default github_webhooks config: -``` +```toml # A Github Webhook Event collector [[inputs.github_webhooks]] ## Address and port to host Webhook listener on @@ -2871,7 +3029,7 @@ This means that the default github_webhooks config: should now look like: -``` +```toml # A Webhooks Event collector [[inputs.webhooks]] ## Address and port to host Webhook listener on @@ -2926,7 +3084,7 @@ consistent with the behavior of `collection_jitter`. - [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! - [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. - [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. -- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified +- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL `http://localhost:15672` if not specified - [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. - [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified - [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. @@ -3024,8 +3182,8 @@ to "stdout". ### Release Notes -- **Breaking change** in jolokia plugin. See -https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md +- **Breaking change** in jolokia plugin. See the +[jolokia README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md) for updated configuration. The plugin will now support proxy mode and will make POST requests. @@ -3130,14 +3288,16 @@ It is not included on the report path. This is necessary for reporting host disk ## v0.12.1 [2016-04-14] ### Release Notes + - Breaking change in the dovecot input plugin. See Features section below. -- Graphite output templates are now supported. See -https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +- Graphite output templates are now supported. See the +[Output Formats README](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite) - Possible breaking change for the librato and graphite outputs. Telegraf will no longer insert field names when the field is simply named `value`. This is because the `value` field is redundant in the graphite/librato context. ### Features + - [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra! - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. @@ -3150,6 +3310,7 @@ because the `value` field is redundant in the graphite/librato context. - [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin. ### Bug Fixes + - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) - [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! - [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! @@ -3158,6 +3319,7 @@ because the `value` field is redundant in the graphite/librato context. ## v0.12.0 [2016-04-05] ### Features + - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension @@ -3177,6 +3339,7 @@ because the `value` field is redundant in the graphite/librato context. - [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bug Fixes + - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. @@ -3191,21 +3354,23 @@ because the `value` field is redundant in the graphite/librato context. ## v0.11.1 [2016-03-17] ### Release Notes + - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features + - [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bug Fixes + - [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix - [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic ## v0.11.0 [2016-03-15] -### Release Notes - ### Features + - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! @@ -3223,6 +3388,7 @@ because the `value` field is redundant in the graphite/librato context. - [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics. ### Bug Fixes + - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! @@ -3238,15 +3404,18 @@ because the `value` field is redundant in the graphite/librato context. ## v0.10.4.1 ### Release Notes + - Bug in the build script broke deb and rpm packages. ### Bug Fixes + - [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken - [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken ## v0.10.4 [2016-02-24] ### Release Notes + - The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, to more accurately indicate their purpose. - There are also now namedrop/namepass parameters for passing/dropping based @@ -3254,6 +3423,7 @@ on the metric _name_. - Experimental windows builds now available. ### Features + - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! @@ -3261,12 +3431,14 @@ on the metric _name_. - [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath! ### Bug Fixes + - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. - [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters. ## v0.10.3 [2016-02-18] ### Release Notes + - Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and @@ -3283,6 +3455,7 @@ points and only flushing on a set time interval. This will default to `true` and is in the `[agent]` config section. ### Features + - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! @@ -3297,6 +3470,7 @@ and is in the `[agent]` config section. - [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes! ### Bug Fixes + - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. @@ -3306,6 +3480,7 @@ and is in the `[agent]` config section. ## v0.10.2 [2016-02-04] ### Release Notes + - Statsd timing measurements are now aggregated into a single measurement with fields. - Graphite output now inserts tags into the bucket in alphabetical order. @@ -3315,6 +3490,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by `insecure_skip_verify` ### Features + - [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse! - [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type. - [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch! @@ -3324,6 +3500,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by - [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support ### Bug Fixes + - [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements. - [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working. - [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong. @@ -3345,6 +3522,7 @@ for the latest measurements, fields, and tags. There is also now support for specifying a docker endpoint to get metrics from. ### Features + - [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! @@ -3369,6 +3547,7 @@ specifying a docker endpoint to get metrics from. - [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso! ### Bug Fixes + - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! @@ -3381,6 +3560,7 @@ specifying a docker endpoint to get metrics from. ## v0.10.0 [2016-01-12] ### Release Notes + - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` and configuration files are in `/etc/telegraf` - **breaking change** `plugins` have been renamed to `inputs`. This was done because @@ -3401,13 +3581,14 @@ instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). -### Packaging change note: +### Packaging change note RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their configurations overwritten by the upgrade. There is a backup stored at /etc/telegraf/telegraf.conf.$(date +%s).backup. ### Features + - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags - Added ability to specify per-plugin measurement suffix and prefix. @@ -3419,17 +3600,20 @@ configurations overwritten by the upgrade. There is a backup stored at ## v0.2.5 [unreleased] ### Features + - [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! - [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! - [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bug Fixes + - [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! - [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features + - [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! - [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! - [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters @@ -3440,12 +3624,14 @@ configurations overwritten by the upgrade. There is a backup stored at - [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bug Fixes + - [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue - [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] ### Release Notes + - **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`. and most of the config option names have changed. This only affects the kafka consumer _plugin_ (not the @@ -3455,7 +3641,7 @@ functional. - Plugins can now be specified as a list, and multiple plugin instances of the same type can be specified, like this: -``` +```toml [[inputs.cpu]] percpu = false totalcpu = true @@ -3470,6 +3656,7 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features + - [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! - [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. - [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! @@ -3477,21 +3664,25 @@ same type can be specified, like this: - [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bug Fixes + - [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. - [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] ### Release Notes + - 0.2.1 has a bug where all lists within plugins get duplicated, this includes lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bug Fixes + - [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] ### Release Notes + - Telegraf will no longer use docker-compose for "long" unit test, it has been changed to just run docker commands in the Makefile. See `make docker-run` and `make docker-kill`. `make test` will still run all unit tests with docker. @@ -3504,6 +3695,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features + - [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! - [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! - [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! @@ -3516,6 +3708,7 @@ same type. - [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bug Fixes + - [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. - [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. - [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes @@ -3524,6 +3717,7 @@ same type. ## v0.2.0 [2015-10-27] ### Release Notes + - The -test flag will now only output 2 collections for plugins that need it - There is a new agent configuration option: `flush_interval`. This option tells Telegraf how often to flush data to InfluxDB and other output sinks. For example, @@ -3540,6 +3734,7 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features + - [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info - [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini - [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin @@ -3564,6 +3759,7 @@ of metrics collected and from how many inputs. - [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bug Fixes + - [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! - [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! - [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! @@ -3576,6 +3772,7 @@ of metrics collected and from how many inputs. ## v0.1.9 [2015-09-22] ### Release Notes + - InfluxDB output config change: `url` is now `urls`, and is a list. Config files will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections @@ -3597,6 +3794,7 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features + - [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support - [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! - [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! @@ -3607,6 +3805,7 @@ re-added in a "verbose" mode if there is demand for it. and filtering when specifying a config file. ### Bug Fixes + - [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug @@ -3622,10 +3821,12 @@ and filtering when specifying a config file. ## v0.1.8 [2015-09-04] ### Release Notes + - Telegraf will now write data in UTC at second precision by default - Now using Go 1.5 to build telegraf ### Features + - [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 - [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes @@ -3639,6 +3840,7 @@ and filtering when specifying a config file. ## v0.1.7 [2015-08-28] ### Features + - [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. - [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. @@ -3648,6 +3850,7 @@ and filtering when specifying a config file. - Indent the toml config file for readability ### Bug Fixes + - [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. - [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! @@ -3656,11 +3859,13 @@ and filtering when specifying a config file. ## v0.1.6 [2015-08-20] ### Features + - [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! - [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies - [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bug Fixes + - [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility - [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! - [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! @@ -3669,6 +3874,7 @@ and filtering when specifying a config file. ## v0.1.5 [2015-08-13] ### Features + - [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! - [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! - [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! @@ -3687,6 +3893,7 @@ and filtering when specifying a config file. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bug Fixes + - [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users - [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes - [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama @@ -3696,31 +3903,37 @@ and filtering when specifying a config file. ## v0.1.4 [2015-07-09] ### Features + - [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bug Fixes + - [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! - [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features + - [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! - [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bug Fixes + - [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! - [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features + - [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! - [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. - [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! - [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bug Fixes + - [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. - [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! - [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d5732dcbfa1d1..06171f26276c4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,4 @@ -### Contributing +# Contributing 1. [Sign the CLA][cla]. 2. Open a [new issue][] to discuss the changes you would like to make. This is @@ -16,30 +16,32 @@ **Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -#### When will your contribution get released? +## When will your contribution get released? + We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. -#### Contributing an External Plugin +## Contributing an External Plugin Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. -#### Security Vulnerability Reporting +## Security Vulnerability Reporting + InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our -open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about -security vulnerability reporting, +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). -### GoDoc +## GoDoc Public interfaces for inputs, outputs, processors, aggregators, metrics, and the accumulator can be found in the GoDoc: [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) -### Common development tasks +## Common development tasks **Adding a dependency:** @@ -52,7 +54,7 @@ Telegraf uses Go modules. Assuming you can already build the project, run this i Before opening a pull request you should run the linter checks and the short tests. -``` +```shell make check make test ``` @@ -63,34 +65,35 @@ make test Running the integration tests requires several docker containers to be running. You can start the containers with: -``` + +```shell docker-compose up ``` To run only the integration tests use: -``` +```shell make test-integration ``` To run the full test suite use: -``` + +```shell make test-all ``` Use `make docker-kill` to stop the containers. ### For more developer resources + - [Code Style][codestyle] - [Deprecation][deprecation] - [Logging][logging] - [Metric Format Changes][metricformat] - [Packaging][packaging] -- [Logging][logging] -- [Packaging][packaging] - [Profiling][profiling] - [Reviews][reviews] -- [Sample Config][sample config] +- [Sample Config][sample config] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 0de5ae47949d9..baa3ff1daf114 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -1,18 +1,19 @@ # External Plugins -This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). -Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. +This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). +Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. Pull requests welcome. ## Inputs + - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. - [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) - [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). - [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. - [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts - [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels @@ -27,8 +28,9 @@ Pull requests welcome. - [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS ## Outputs + - [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. ## Processors - - [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. - + +- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. diff --git a/Makefile b/Makefile index 09a6babaee73f..2d19dd19fc4c1 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,8 @@ HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) -X main.goos=$(GOOS) -X main.goarch=$(GOARCH) ifneq ($(tag),) LDFLAGS += -X main.version=$(version) +else + LDFLAGS += -X main.version=$(version)-$(commit) endif # Go built-in race detector works only for 64 bits architectures. @@ -140,24 +142,32 @@ vet: .PHONY: lint-install lint-install: - + @echo "Installing golangci-lint" go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 + @echo "Installing markdownlint" + npm install -g markdownlint-cli + .PHONY: lint lint: -ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 -endif - + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } golangci-lint run + @which markdownlint >/dev/null 2>&1 || { \ + echo "markdownlint not found, please run: make lint-install"; \ + exit 1; \ + } + markdownlint . + .PHONY: lint-branch lint-branch: -ifeq (, $(shell which golangci-lint)) - $(info golangci-lint can't be found, please run: make lint-install) - exit 1 -endif + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } golangci-lint run --new-from-rev master @@ -201,8 +211,8 @@ plugin-%: .PHONY: ci-1.17 ci-1.17: - docker build -t quay.io/influxdb/telegraf-ci:1.17.2 - < scripts/ci-1.17.docker - docker push quay.io/influxdb/telegraf-ci:1.17.2 + docker build -t quay.io/influxdb/telegraf-ci:1.17.3 - < scripts/ci-1.17.docker + docker push quay.io/influxdb/telegraf-ci:1.17.3 .PHONY: install install: $(buildbin) diff --git a/README.md b/README.md index b76ad45c0d1a3..122b20839db6b 100644 --- a/README.md +++ b/README.md @@ -43,24 +43,29 @@ page or from each [GitHub Releases](https://github.com/influxdata/telegraf/relea InfluxData also provides a package repo that contains both DEB and RPM downloads. -For deb-based platforms run the following to add the repo key and setup a new -sources.list entry: +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: ```shell -curl -s https://repos.influxdata.com/influxdb.key | gpg --dearmor > /etc/apt/trusted.gpg.d/influxdb.gpg -export DISTRIB_ID=$(lsb_release -si); export DISTRIB_CODENAME=$(lsb_release -sc) -echo "deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/${DISTRIB_ID,,} ${DISTRIB_CODENAME} stable" > /etc/apt/sources.list.d/influxdb.list +wget -qO- https://repos.influxdata.com/influxdb.key | sudo tee /etc/apt/trusted.gpg.d/influxdb.asc >/dev/null +source /etc/os-release +echo "deb https://repos.influxdata.com/${ID} ${VERSION_CODENAME} stable" | sudo tee /etc/apt/sources.list.d/influxdb.list +sudo apt-get update && sudo apt-get install telegraf ``` -For RPM-based platforms use the following repo file in `/etc/yum.repos.d/`: +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: -```text +```shell +cat <=1.17 (1.17.2 recommended) 2. Clone the Telegraf repository: - ``` + + ```shell git clone https://github.com/influxdata/telegraf.git ``` + 3. Run `make` from the source directory - ``` + + ```shell cd telegraf make ``` @@ -101,31 +109,31 @@ See usage with: telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file ```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined ```shell telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout ```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file ```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins ```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb @@ -133,7 +141,7 @@ telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ## Documentation -[Latest Release Documentation](https://docs.influxdata.com/telegraf) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) For documentation on the latest development code see the [documentation index](/docs). diff --git a/SECURITY.md b/SECURITY.md index 1d74711aa9079..5b72cf8634467 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,5 +2,5 @@ ## Reporting a Vulnerability -InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 688c1e5bdd6c5..390c55ad1d818 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -15,6 +15,8 @@ import ( "syscall" "time" + "github.com/fatih/color" + "github.com/influxdata/tail/watch" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" @@ -60,6 +62,8 @@ var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") +var fDeprecationList = flag.Bool("deprecation-list", false, + "print all deprecated plugins or plugin options.") var fSectionFilters = flag.String("section-filter", "", "filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'") var fInputFilters = flag.String("input-filter", "", @@ -254,6 +258,29 @@ func runAgent(ctx context.Context, logger.SetupLogging(logConfig) + log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) + log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) + if !*fRunOnce && (*fTest || *fTestWait != 0) { + log.Print(color.RedString("W! Outputs are not used in testing mode!")) + } else { + log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + } + log.Printf("I! Tags enabled: %s", c.ListTags()) + + if count, found := c.Deprecations["inputs"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated inputs: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["aggregators"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated aggregators: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["processors"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated processors: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["outputs"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated outputs: %d and %d options", count[0], count[1]) + } + if *fRunOnce { wait := time.Duration(*fTestWait) * time.Second return ag.Once(ctx, wait) @@ -264,12 +291,6 @@ func runAgent(ctx context.Context, return ag.Test(ctx, wait) } - log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) - log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) - log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("I! Tags enabled: %s", c.ListTags()) - if *fPidfile != "" { f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { @@ -348,6 +369,11 @@ func main() { logger.SetupLogging(logger.LogConfig{}) + // Configure version + if err := internal.SetVersion(version); err != nil { + log.Println("Telegraf version already configured to: " + internal.Version()) + } + // Load external plugins, if requested. if *fPlugins != "" { log.Printf("I! Loading external plugins from: %s", *fPlugins) @@ -392,6 +418,27 @@ func main() { // switch for flags which just do something and exit immediately switch { + case *fDeprecationList: + c := config.NewConfig() + infos := c.CollectDeprecationInfos( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Input Plugins: ") + c.PrintDeprecationList(infos["inputs"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Output Plugins: ") + c.PrintDeprecationList(infos["outputs"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Processor Plugins: ") + c.PrintDeprecationList(infos["processors"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Aggregator Plugins: ") + c.PrintDeprecationList(infos["aggregators"]) + return case *fOutputList: fmt.Println("Available Output Plugins: ") names := make([]string, 0, len(outputs.Outputs)) @@ -435,16 +482,6 @@ func main() { return } - shortVersion := version - if shortVersion == "" { - shortVersion = "unknown" - } - - // Configure version - if err := internal.SetVersion(shortVersion); err != nil { - log.Println("Telegraf version already configured to: " + internal.Version()) - } - run( inputFilters, outputFilters, diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 38222f2d0871d..8e2a6be1925d5 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -45,9 +45,12 @@ func (p *program) run() { p.inputFilters, p.outputFilters, ) + close(stop) } func (p *program) Stop(s service.Service) error { - close(stop) + var empty struct{} + stop <- empty // signal reloadLoop to finish (context cancel) + <-stop // wait for reloadLoop to finish and close channel return nil } diff --git a/config/config.go b/config/config.go index d6081aedcfaf3..4121c71687e66 100644 --- a/config/config.go +++ b/config/config.go @@ -17,6 +17,8 @@ import ( "strings" "time" + "github.com/coreos/go-semver/semver" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/choice" @@ -77,6 +79,9 @@ type Config struct { // Processors have a slice wrapper type because they need to be sorted Processors models.RunningProcessors AggProcessors models.RunningProcessors + + Deprecations map[string][]int64 + version *semver.Version } // NewConfig creates a new struct to hold the Telegraf config. @@ -102,7 +107,15 @@ func NewConfig() *Config { AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), + Deprecations: make(map[string][]int64), + } + + // Handle unknown version + version := internal.Version() + if version == "" || version == "unknown" { + version = "0.0.0-unknown" } + c.version = semver.New(version) tomlCfg := &toml.Config{ NormFieldName: toml.DefaultConfig.NormFieldName, @@ -165,7 +178,8 @@ type AgentConfig struct { // TODO(cam): Remove UTC and parameter, they are no longer // valid for the agent config. Leaving them here for now for backwards- // compatibility - UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect + // Deprecated: 1.0.0 after, has no effect + UTC bool `toml:"utc"` // Debug is the option for running in debug mode Debug bool `toml:"debug"` @@ -560,9 +574,13 @@ func printFilteredInputs(inputFilters []string, commented bool) { // Print Inputs for _, pname := range pnames { + // Skip inputs that are registered twice for backward compatibility if pname == "cisco_telemetry_gnmi" { continue } + if pname == "KNXListener" { + continue + } creator := inputs.Inputs[pname] input := creator() @@ -1004,6 +1022,11 @@ func parseConfig(contents []byte) (*ast.Table, error) { func (c *Config) addAggregator(name string, table *ast.Table) error { creator, ok := aggregators.Aggregators[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := aggregators.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("aggregators", name, di) + return fmt.Errorf("plugin deprecated") + } return fmt.Errorf("Undefined but requested aggregator: %s", name) } aggregator := creator() @@ -1017,6 +1040,10 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { return err } + if err := c.printUserDeprecation("aggregators", name, aggregator); err != nil { + return err + } + c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf)) return nil } @@ -1024,6 +1051,11 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { func (c *Config) addProcessor(name string, table *ast.Table) error { creator, ok := processors.Processors[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := processors.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("processors", name, di) + return fmt.Errorf("plugin deprecated") + } return fmt.Errorf("Undefined but requested processor: %s", name) } @@ -1065,6 +1097,10 @@ func (c *Config) newRunningProcessor( } } + if err := c.printUserDeprecation("processors", processorConfig.Name, processor); err != nil { + return nil, err + } + rf := models.NewRunningProcessor(processor, processorConfig) return rf, nil } @@ -1075,6 +1111,11 @@ func (c *Config) addOutput(name string, table *ast.Table) error { } creator, ok := outputs.Outputs[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := outputs.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("outputs", name, di) + return fmt.Errorf("plugin deprecated") + } return fmt.Errorf("Undefined but requested output: %s", name) } output := creator() @@ -1099,6 +1140,10 @@ func (c *Config) addOutput(name string, table *ast.Table) error { return err } + if err := c.printUserDeprecation("outputs", name, output); err != nil { + return err + } + ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil @@ -1108,13 +1153,23 @@ func (c *Config) addInput(name string, table *ast.Table) error { if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } + // Legacy support renaming io input to diskio if name == "io" { + if err := c.printUserDeprecation("inputs", name, nil); err != nil { + return err + } name = "diskio" } creator, ok := inputs.Inputs[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := inputs.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("inputs", name, di) + return fmt.Errorf("plugin deprecated") + } + return fmt.Errorf("Undefined but requested input: %s", name) } input := creator() @@ -1148,6 +1203,10 @@ func (c *Config) addInput(name string, table *ast.Table) error { return err } + if err := c.printUserDeprecation("inputs", name, input); err != nil { + return err + } + rp := models.NewRunningInput(input, pluginConfig) rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) diff --git a/config/config_test.go b/config/config_test.go index 940b84ada7773..546b752f3a383 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -10,13 +10,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { @@ -140,12 +141,17 @@ func TestConfig_LoadDirectory(t *testing.T) { expectedConfigs[0].Tags = make(map[string]string) expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin) - p, err := parsers.NewParser(&parsers.Config{ + parserConfig := &parsers.Config{ MetricName: "exec", DataFormat: "json", JSONStrict: true, - }) + } + p, err := parsers.NewParser(parserConfig) require.NoError(t, err) + + // Inject logger to have proper struct for comparison + models.SetLoggerOnPlugin(p, models.NewLogger("parsers", parserConfig.DataFormat, parserConfig.MetricName)) + expectedPlugins[1].SetParser(p) expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar" expectedConfigs[1] = &models.InputConfig{ diff --git a/config/deprecation.go b/config/deprecation.go new file mode 100644 index 0000000000000..ab5d2a0caba46 --- /dev/null +++ b/config/deprecation.go @@ -0,0 +1,378 @@ +package config + +import ( + "fmt" + "log" //nolint:revive // log is ok here as the logging facility is not set-up yet + "reflect" + "sort" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/fatih/color" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/processors" +) + +// Escalation level for the plugin or option +type Escalation int + +func (e Escalation) String() string { + switch e { + case Warn: + return "WARN" + case Error: + return "ERROR" + } + return "NONE" +} + +const ( + // None means no deprecation + None Escalation = iota + // Warn means deprecated but still within the grace period + Warn + // Error means deprecated and beyond grace period + Error +) + +// deprecationInfo contains all important information to describe a deprecated entity +type deprecationInfo struct { + // Name of the plugin or plugin option + Name string + // LogLevel is the level of deprecation which currently corresponds to a log-level + LogLevel Escalation + info telegraf.DeprecationInfo +} + +func (di *deprecationInfo) determineEscalation(telegrafVersion *semver.Version) error { + di.LogLevel = None + if di.info.Since == "" { + return nil + } + + since, err := semver.NewVersion(di.info.Since) + if err != nil { + return fmt.Errorf("cannot parse 'since' version %q: %v", di.info.Since, err) + } + + var removal *semver.Version + if di.info.RemovalIn != "" { + removal, err = semver.NewVersion(di.info.RemovalIn) + if err != nil { + return fmt.Errorf("cannot parse 'removal' version %q: %v", di.info.RemovalIn, err) + } + } else { + removal = &semver.Version{Major: since.Major} + removal.BumpMajor() + di.info.RemovalIn = removal.String() + } + + // Drop potential pre-release tags + version := semver.Version{ + Major: telegrafVersion.Major, + Minor: telegrafVersion.Minor, + Patch: telegrafVersion.Patch, + } + if !version.LessThan(*removal) { + di.LogLevel = Error + } else if !version.LessThan(*since) { + di.LogLevel = Warn + } + return nil +} + +// pluginDeprecationInfo holds all information about a deprecated plugin or it's options +type pluginDeprecationInfo struct { + deprecationInfo + + // Options deprecated for this plugin + Options []deprecationInfo +} + +func (c *Config) incrementPluginDeprecations(category string) { + newcounts := []int64{1, 0} + if counts, found := c.Deprecations[category]; found { + newcounts = []int64{counts[0] + 1, counts[1]} + } + c.Deprecations[category] = newcounts +} + +func (c *Config) incrementPluginOptionDeprecations(category string) { + newcounts := []int64{0, 1} + if counts, found := c.Deprecations[category]; found { + newcounts = []int64{counts[0], counts[1] + 1} + } + c.Deprecations[category] = newcounts +} + +func (c *Config) collectDeprecationInfo(category, name string, plugin interface{}, all bool) pluginDeprecationInfo { + info := pluginDeprecationInfo{ + deprecationInfo: deprecationInfo{ + Name: category + "." + name, + LogLevel: None, + }, + } + + // First check if the whole plugin is deprecated + switch category { + case "aggregators": + if pi, deprecated := aggregators.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "inputs": + if pi, deprecated := inputs.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "outputs": + if pi, deprecated := outputs.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "processors": + if pi, deprecated := processors.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + } + if err := info.determineEscalation(c.version); err != nil { + panic(fmt.Errorf("plugin %q: %v", info.Name, err)) + } + if info.LogLevel != None { + c.incrementPluginDeprecations(category) + } + + // Allow checking for names only. + if plugin == nil { + return info + } + + // Check for deprecated options + walkPluginStruct(reflect.ValueOf(plugin), func(field reflect.StructField, value reflect.Value) { + // Try to report only those fields that are set + if !all && value.IsZero() { + return + } + + tags := strings.SplitN(field.Tag.Get("deprecated"), ";", 3) + if len(tags) < 1 || tags[0] == "" { + return + } + optionInfo := deprecationInfo{Name: field.Name} + optionInfo.info.Since = tags[0] + + if len(tags) > 1 { + optionInfo.info.Notice = tags[len(tags)-1] + } + if len(tags) > 2 { + optionInfo.info.RemovalIn = tags[1] + } + if err := optionInfo.determineEscalation(c.version); err != nil { + panic(fmt.Errorf("plugin %q option %q: %v", info.Name, field.Name, err)) + } + + if optionInfo.LogLevel != None { + c.incrementPluginOptionDeprecations(category) + } + + // Get the toml field name + option := field.Tag.Get("toml") + if option != "" { + optionInfo.Name = option + } + info.Options = append(info.Options, optionInfo) + }) + + return info +} + +func (c *Config) printUserDeprecation(category, name string, plugin interface{}) error { + info := c.collectDeprecationInfo(category, name, plugin, false) + + switch info.LogLevel { + case Warn: + prefix := "W! " + color.YellowString("DeprecationWarning") + printPluginDeprecationNotice(prefix, info.Name, info.info) + // We will not check for any deprecated options as the whole plugin is deprecated anyway. + return nil + case Error: + prefix := "E! " + color.RedString("DeprecationError") + printPluginDeprecationNotice(prefix, info.Name, info.info) + // We are past the grace period + return fmt.Errorf("plugin deprecated") + } + + // Print deprecated options + deprecatedOptions := make([]string, 0) + for _, option := range info.Options { + switch option.LogLevel { + case Warn: + prefix := "W! " + color.YellowString("DeprecationWarning") + printOptionDeprecationNotice(prefix, info.Name, option.Name, option.info) + case Error: + prefix := "E! " + color.RedString("DeprecationError") + printOptionDeprecationNotice(prefix, info.Name, option.Name, option.info) + deprecatedOptions = append(deprecatedOptions, option.Name) + } + } + + if len(deprecatedOptions) > 0 { + return fmt.Errorf("plugin options %q deprecated", strings.Join(deprecatedOptions, ",")) + } + + return nil +} + +func (c *Config) CollectDeprecationInfos(inFilter, outFilter, aggFilter, procFilter []string) map[string][]pluginDeprecationInfo { + infos := make(map[string][]pluginDeprecationInfo) + + infos["inputs"] = make([]pluginDeprecationInfo, 0) + for name, creator := range inputs.Inputs { + if len(inFilter) > 0 && !sliceContains(name, inFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("inputs", name, plugin, true) + + if info.LogLevel != None || len(info.Options) > 0 { + infos["inputs"] = append(infos["inputs"], info) + } + } + + infos["outputs"] = make([]pluginDeprecationInfo, 0) + for name, creator := range outputs.Outputs { + if len(outFilter) > 0 && !sliceContains(name, outFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("outputs", name, plugin, true) + + if info.LogLevel != None || len(info.Options) > 0 { + infos["outputs"] = append(infos["outputs"], info) + } + } + + infos["processors"] = make([]pluginDeprecationInfo, 0) + for name, creator := range processors.Processors { + if len(procFilter) > 0 && !sliceContains(name, procFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("processors", name, plugin, true) + + if info.LogLevel != None || len(info.Options) > 0 { + infos["processors"] = append(infos["processors"], info) + } + } + + infos["aggregators"] = make([]pluginDeprecationInfo, 0) + for name, creator := range aggregators.Aggregators { + if len(aggFilter) > 0 && !sliceContains(name, aggFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("aggregators", name, plugin, true) + + if info.LogLevel != None || len(info.Options) > 0 { + infos["aggregators"] = append(infos["aggregators"], info) + } + } + + return infos +} + +func (c *Config) PrintDeprecationList(plugins []pluginDeprecationInfo) { + sort.Slice(plugins, func(i, j int) bool { return plugins[i].Name < plugins[j].Name }) + + for _, plugin := range plugins { + switch plugin.LogLevel { + case Warn, Error: + _, _ = fmt.Printf( + " %-40s %-5s since %-5s removal in %-5s %s\n", + plugin.Name, plugin.LogLevel, plugin.info.Since, plugin.info.RemovalIn, plugin.info.Notice, + ) + } + + if len(plugin.Options) < 1 { + continue + } + sort.Slice(plugin.Options, func(i, j int) bool { return plugin.Options[i].Name < plugin.Options[j].Name }) + for _, option := range plugin.Options { + _, _ = fmt.Printf( + " %-40s %-5s since %-5s removal in %-5s %s\n", + plugin.Name+"/"+option.Name, option.LogLevel, option.info.Since, option.info.RemovalIn, option.info.Notice, + ) + } + } +} + +func printHistoricPluginDeprecationNotice(category, name string, info telegraf.DeprecationInfo) { + prefix := "E! " + color.RedString("DeprecationError") + log.Printf( + "%s: Plugin %q deprecated since version %s and removed: %s", + prefix, category+"."+name, info.Since, info.Notice, + ) +} + +func printPluginDeprecationNotice(prefix, name string, info telegraf.DeprecationInfo) { + log.Printf( + "%s: Plugin %q deprecated since version %s and will be removed in %s: %s", + prefix, name, info.Since, info.RemovalIn, info.Notice, + ) +} + +func printOptionDeprecationNotice(prefix, plugin, option string, info telegraf.DeprecationInfo) { + log.Printf( + "%s: Option %q of plugin %q deprecated since version %s and will be removed in %s: %s", + prefix, option, plugin, info.Since, info.RemovalIn, info.Notice, + ) +} + +// walkPluginStruct iterates over the fields of a structure in depth-first search (to cover nested structures) +// and calls the given function for every visited field. +func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv reflect.Value)) { + v := reflect.Indirect(value) + t := v.Type() + + // Only works on structs + if t.Kind() != reflect.Struct { + return + } + + // Walk over the struct fields and call the given function. If we encounter more complex embedded + // elements (stucts, slices/arrays, maps) we need to descend into those elements as they might + // contain structures nested in the current structure. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + fieldValue := v.Field(i) + + if field.PkgPath != "" { + continue + } + switch field.Type.Kind() { + case reflect.Struct: + walkPluginStruct(fieldValue, fn) + case reflect.Array, reflect.Slice: + for j := 0; j < fieldValue.Len(); j++ { + element := fieldValue.Index(j) + // The array might contain structs + walkPluginStruct(element, fn) + fn(field, element) + } + case reflect.Map: + iter := fieldValue.MapRange() + for iter.Next() { + element := iter.Value() + // The map might contain structs + walkPluginStruct(element, fn) + fn(field, element) + } + } + fn(field, fieldValue) + } +} diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index 0edf467837457..265b9fa6893a9 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -1,15 +1,15 @@ -### Aggregator Plugins +# Aggregator Plugins This section is for developers who want to create a new aggregator plugin. -### Aggregator Plugin Guidelines +## Aggregator Plugin Guidelines * A aggregator must conform to the [telegraf.Aggregator][] interface. * Aggregators should call `aggregators.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. -- The `SampleConfig` function should return valid toml that describes how the +* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this aggregator does. @@ -17,7 +17,7 @@ This section is for developers who want to create a new aggregator plugin. through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. -- Follow the recommended [Code Style][]. +* Follow the recommended [Code Style][]. ### Aggregator Plugin Example @@ -27,21 +27,21 @@ package min // min.go import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" ) type Min struct { - // caches for metric fields, names, and tags - fieldCache map[uint64]map[string]float64 - nameCache map[uint64]string - tagCache map[uint64]map[string]string + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string } func NewMin() telegraf.Aggregator { - m := &Min{} - m.Reset() - return m + m := &Min{} + m.Reset() + return m } var sampleConfig = ` @@ -53,77 +53,77 @@ var sampleConfig = ` ` func (m *Min) Init() error { - return nil + return nil } func (m *Min) SampleConfig() string { - return sampleConfig + return sampleConfig } func (m *Min) Description() string { - return "Keep the aggregate min of each metric passing through." + return "Keep the aggregate min of each metric passing through." } func (m *Min) Add(in telegraf.Metric) { - id := in.HashID() - if _, ok := m.nameCache[id]; !ok { - // hit an uncached metric, create caches for first time: - m.nameCache[id] = in.Name() - m.tagCache[id] = in.Tags() - m.fieldCache[id] = make(map[string]float64) - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - m.fieldCache[id][k] = fv - } - } - } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.fieldCache[id][k]; !ok { - // hit an uncached field of a cached metric - m.fieldCache[id][k] = fv - continue - } - if fv < m.fieldCache[id][k] { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { // set new minimum - m.fieldCache[id][k] = fv - } - } - } - } + m.fieldCache[id][k] = fv + } + } + } + } } func (m *Min) Push(acc telegraf.Accumulator) { - for id, _ := range m.nameCache { - fields := map[string]interface{}{} - for k, v := range m.fieldCache[id] { - fields[k+"_min"] = v - } - acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) - } + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } } func (m *Min) Reset() { - m.fieldCache = make(map[uint64]map[string]float64) - m.nameCache = make(map[uint64]string) - m.tagCache = make(map[uint64]map[string]string) + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) } func convert(in interface{}) (float64, bool) { - switch v := in.(type) { - case float64: - return v, true - case int64: - return float64(v), true - default: - return 0, false - } + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } } func init() { - aggregators.Add("min", func() telegraf.Aggregator { - return NewMin() - }) + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) } ``` diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 934a4b0cf7706..389138cec7a94 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -5,7 +5,7 @@ As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugin These plugins sit in-between Input & Output plugins, aggregating and processing metrics as they pass through Telegraf: -``` +```text ┌───────────┐ │ │ │ CPU │───┐ @@ -44,12 +44,14 @@ to control which metrics are passed through a processor or aggregator. If a metric is filtered out the metric bypasses the plugin and is passed downstream to the next plugin. -### Processor +## Processor + Processor plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. -### Aggregator +## Aggregator + Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, minimum, maximum, or standard deviation. For this reason, all _aggregator_ diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md index cb0c31268c9a4..4579e84488058 100644 --- a/docs/COMMANDS_AND_FLAGS.md +++ b/docs/COMMANDS_AND_FLAGS.md @@ -1,29 +1,30 @@ # Telegraf Commands & Flags -### Usage +## Usage -``` +```shell telegraf [commands] telegraf [flags] ``` -### Commands +## Commands |command|description| |--------|-----------------------------------------------| |`config` |print out full sample configuration to stdout| |`version`|print the version to stdout| -### Flags +## Flags |flag|description| |-------------------|------------| |`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| |`--config ` |configuration file to load| |`--config-directory ` |directory containing additional *.conf files| -|`--watch-config` |Telegraf will restart on local config changes.
Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`.
Monitoring is off by default.| +|`--watch-config` |Telegraf will restart on local config changes. Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`. Monitoring is off by default.| |`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| |`--debug` |turn on debug logging| +|`--deprecation-list` |print all deprecated plugins or plugin options| |`--input-filter ` |filter the inputs to enable, separator is `:`| |`--input-list` |print available input plugins.| |`--output-filter ` |filter the outputs to enable, separator is `:`| @@ -32,15 +33,15 @@ telegraf [flags] |`--pprof-addr
` |pprof address to listen on, don't activate pprof if empty| |`--processor-filter ` |filter the processors to enable, separator is `:`| |`--quiet` |run in quiet mode| -|`--section-filter` |filter config sections to output, separator is `:`
Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--section-filter` |filter config sections to output, separator is `:`. Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| |`--sample-config` |print out full sample configuration| |`--once` |enable once mode: gather metrics once, write them, and exit| -|`--test` |enable test mode: gather metrics once and print them| -|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode| +|`--test` |enable test mode: gather metrics once and print them. **No outputs are executed!**| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode. **Implies `--test` if not used with `--once`**| |`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| |`--version` |display the version and exit| -### Examples +## Examples **Generate a telegraf config file:** @@ -55,7 +56,7 @@ telegraf [flags] `telegraf --config telegraf.conf --test` **Run telegraf with all plugins defined in config file:** - + `telegraf --config telegraf.conf` **Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9af88b669ea9f..25d10a90b1340 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,3 +1,5 @@ + + # Configuration Telegraf's configuration file is written using [TOML][] and is composed of @@ -5,9 +7,10 @@ three sections: [global tags][], [agent][] settings, and [plugins][]. View the default [telegraf.conf][] config file with all available plugins. -### Generating a Configuration File +## Generating a Configuration File A default config file can be generated by telegraf: + ```sh telegraf config > telegraf.conf ``` @@ -21,7 +24,7 @@ telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config [View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. -### Configuration Loading +## Configuration Loading The location of the configuration file can be set via the `--config` command line flag. @@ -34,7 +37,7 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for the main configuration file and `/etc/telegraf/telegraf.d` for the directory of configuration files. -### Environment Variables +## Environment Variables Environment variables can be used anywhere in the config file, simply surround them with `${}`. Replacement occurs before file parsing. For strings @@ -49,14 +52,17 @@ in the `/etc/default/telegraf` file. `/etc/default/telegraf`: For InfluxDB 1.x: -``` + +```shell USER="alice" INFLUX_URL="http://localhost:8086" INFLUX_SKIP_DATABASE_CREATION="true" INFLUX_PASSWORD="monkey123" ``` + For InfluxDB OSS 2: -``` + +```shell INFLUX_HOST="http://localhost:8086" # used to be 9999 INFLUX_TOKEN="replace_with_your_token" INFLUX_ORG="your_username" @@ -64,7 +70,8 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` For InfluxDB Cloud 2: -``` + +```shell # For AWS West (Oregon) INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls @@ -74,6 +81,7 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` `/etc/telegraf.conf`: + ```toml [global_tags] user = "${USER}" @@ -103,6 +111,7 @@ INFLUX_BUCKET="replace_with_your_bucket_name" The above files will produce the following effective configuration file to be parsed: + ```toml [global_tags] user = "alice" @@ -132,17 +141,18 @@ parsed: bucket = "replace_with_your_bucket_name" ``` -### Intervals +## Intervals Intervals are durations of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. + ```toml [agent] interval = "10s" ``` -### Global Tags +## Global Tags Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. @@ -153,7 +163,7 @@ Global tags are overriden by tags set by plugins. dc = "us-east-1" ``` -### Agent +## Agent The agent table configures Telegraf and the defaults used across all plugins. @@ -209,7 +219,6 @@ The agent table configures Telegraf and the defaults used across all plugins. Name of the file to be logged to when using the "file" logtarget. If set to the empty string then logs are written to stderr. - - **logfile_rotation_interval**: The logfile will be rotated after the time interval specified. When set to 0 no time based rotation is performed. @@ -231,7 +240,7 @@ The agent table configures Telegraf and the defaults used across all plugins. - **omit_hostname**: If set to true, do no set the "host" tag in the telegraf agent. -### Plugins +## Plugins Telegraf plugins are divided into 4 types: [inputs][], [outputs][], [processors][], and [aggregators][]. @@ -287,6 +296,7 @@ emitted from the input plugin. #### Examples Use the name_suffix parameter to emit measurements with the name `cpu_total`: + ```toml [[inputs.cpu]] name_suffix = "_total" @@ -295,6 +305,7 @@ Use the name_suffix parameter to emit measurements with the name `cpu_total`: ``` Use the name_override parameter to emit measurements with the name `foobar`: + ```toml [[inputs.cpu]] name_override = "foobar" @@ -307,6 +318,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` > **NOTE**: With TOML, order matters. Parameters belong to the last defined > table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin > definition. + ```toml [[inputs.cpu]] percpu = false @@ -318,6 +330,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` Utilize `name_override`, `name_prefix`, or `name_suffix` config options to avoid measurement collisions when defining multiple plugins: + ```toml [[inputs.cpu]] percpu = false @@ -357,6 +370,7 @@ emitted from the output plugin. #### Examples Override flush parameters for a single output: + ```toml [agent] flush_interval = "10s" @@ -394,6 +408,7 @@ processor. If the order processors are applied matters you must set order on all involved processors: + ```toml [[processors.rename]] order = 1 @@ -445,6 +460,7 @@ aggregator. Collect and emit the min/max of the system load1 metric every 30s, dropping the originals. + ```toml [[inputs.system]] fieldpass = ["load1"] # collects system load1 metric. @@ -460,6 +476,7 @@ the originals. Collect and emit the min/max of the swap metrics every 30s, dropping the originals. The aggregator will not be applied to the system load metrics due to the `namepass` parameter. + ```toml [[inputs.swap]] @@ -475,14 +492,13 @@ to the `namepass` parameter. files = ["stdout"] ``` - -### Metric Filtering +## Metric Filtering Metric filtering can be configured per plugin on any input, output, processor, and aggregator plugin. Filters fall under two categories: Selectors and Modifiers. -#### Selectors +### Selectors Selector filters include or exclude entire metrics. When a metric is excluded from a Input or an Output plugin, the metric is dropped. If a metric is @@ -510,7 +526,7 @@ is tested on metrics after they have passed the `tagpass` test. defined at the *_end_* of the plugin definition, otherwise subsequent plugin config options will be interpreted as part of the tagpass/tagdrop tables. -#### Modifiers +### Modifiers Modifier filters remove tags and fields from a metric. If all fields are removed the metric is removed. @@ -536,9 +552,10 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns will be discarded from the metric. Any tag can be filtered including global tags and the agent `host` tag. -#### Filtering Examples +### Filtering Examples + +#### Using tagpass and tagdrop -##### Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -571,7 +588,8 @@ tags and the agent `host` tag. instance = ["isatap*", "Local*"] ``` -##### Using fieldpass and fielddrop: +#### Using fieldpass and fielddrop + ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -584,7 +602,8 @@ tags and the agent `host` tag. fieldpass = ["inodes*"] ``` -##### Using namepass and namedrop: +#### Using namepass and namedrop + ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -597,7 +616,8 @@ tags and the agent `host` tag. namepass = ["rest_client_*"] ``` -##### Using taginclude and tagexclude: +#### Using taginclude and tagexclude + ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -610,7 +630,8 @@ tags and the agent `host` tag. tagexclude = ["fstype"] ``` -##### Metrics can be routed to different outputs using the metric name and tags: +#### Metrics can be routed to different outputs using the metric name and tags + ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -632,7 +653,7 @@ tags and the agent `host` tag. cpu = ["cpu0"] ``` -##### Routing metrics to different outputs based on the input. +#### Routing metrics to different outputs based on the input Metrics are tagged with `influxdb_database` in the input, which is then used to select the output. The tag is removed in the outputs before writing. @@ -656,7 +677,7 @@ select the output. The tag is removed in the outputs before writing. influxdb_database = "other" ``` -### Transport Layer Security (TLS) +## Transport Layer Security (TLS) Reference the detailed [TLS][] documentation. diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000..5d0484e10be5a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,3 @@ +# Telegraf Docker Images + +Docker images for Telegraf are kept in the [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) repo. diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index 83759ed72bb63..f3dc0699ca2df 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -1,8 +1,8 @@ -### External Plugins +# External Plugins -[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside -of Telegraf that can run through an `execd` plugin. These external plugins allow for -more flexibility compared to internal Telegraf plugins. +[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside +of Telegraf that can run through an `execd` plugin. These external plugins allow for +more flexibility compared to internal Telegraf plugins. - External plugins can be written in any language (internal Telegraf plugins can only written in Go) - External plugins can access to libraries not written in Go @@ -11,7 +11,8 @@ more flexibility compared to internal Telegraf plugins. - You don't need to wait on the Telegraf team to publish your plugin and start working with it. - using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use -### External Plugin Guidelines +## External Plugin Guidelines + The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), [output](/docs/OUTPUTS.md), [processor](/docs/PROCESSORS.md), and [aggregator](/docs/AGGREGATORS.md) plugins. Please reference the documentation on how to create these plugins written in Go. @@ -19,51 +20,55 @@ Please reference the documentation on how to create these plugins written in Go. _For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance and feature development of external plugins. Expect to have users open plugin issues on its respective GitHub repository._ -#### Execd Go Shim +### Execd Go Shim + For Go plugins, there is a [Execd Go Shim](/plugins/common/shim/) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim allows anyone to build and run it as a separate app using one of the `execd`plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) Follow the [Steps to externalize a plugin](/plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](/plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim -#### Step-by-Step guidelines -This is a guide to help you set up your plugin to use it with `execd` -1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: +### Step-by-Step guidelines + +This is a guide to help you set up your plugin to use it with `execd`: + +1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: - [Input Plugins](/docs/INPUTS.md) - [Processor Plugins](/docs/PROCESSORS.md) - [Aggregator Plugins](/docs/AGGREGATORS.md) - [Output Plugins](/docs/OUTPUTS.md) 2. If your plugin is written in Go, include the steps for the [Execd Go Shim](/plugins/common/shim#steps-to-build-and-run-your-plugin) - 1. Move the project to an external repo, it's recommended to preserve the path - structure, (but not strictly necessary). eg if your plugin was at - `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` - in the new repo. For a further example of what this might look like, take a - look at [ssoroka/rand](https://github.com/ssoroka/rand) or - [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) - 1. Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. - This will be the entrypoint to the plugin when run as a stand-alone program, and - it will call the shim code for you to make that happen. It's recommended to - have only one plugin per repo, as the shim is not designed to run multiple - plugins at the same time (it would vastly complicate things). - 1. Edit the main.go file to import your plugin. Within Telegraf this would have - been done in an all.go file, but here we don't split the two apart, and the change - just goes in the top of main.go. If you skip this step, your plugin will do nothing. - eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` - 1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration - specific to your plugin. Note that this config file **must be separate from the - rest of the config for Telegraf, and must not be in a shared directory where - Telegraf is expecting to load all configs**. If Telegraf reads this config file - it will not know which plugin it relates to. Telegraf instead uses an execd config - block to look for this plugin. - 1. Add usage and development instructions in the homepage of your repository for running - your plugin with its respective `execd` plugin. Please refer to - [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) - for examples. Include the following steps: + - Move the project to an external repo, it's recommended to preserve the path + structure, (but not strictly necessary). eg if your plugin was at + `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` + in the new repo. For a further example of what this might look like, take a + look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) + - Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. It's recommended to + have only one plugin per repo, as the shim is not designed to run multiple + plugins at the same time (it would vastly complicate things). + - Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. + eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` + - Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. Telegraf instead uses an execd config + block to look for this plugin. + - Add usage and development instructions in the homepage of your repository for running + your plugin with its respective `execd` plugin. Please refer to + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) + for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin 1. The commands to build your binary 1. Location to edit your `telegraf.conf` - 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), + 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) - 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) - list. Please include the plugin name, link to the plugin repository and a short description of the plugin. + - Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) + list. Please include the plugin name, link to the plugin repository and a short description of the plugin. diff --git a/docs/FAQ.md b/docs/FAQ.md index 40a101fdf6fe1..c702a91564994 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,24 +1,23 @@ # Frequently Asked Questions -### Q: How can I monitor the Docker Engine Host from within a container? +## Q: How can I monitor the Docker Engine Host from within a container? You will need to setup several volume mounts as well as some environment variables: -``` + +```shell docker run --name telegraf \ - -v /:/hostfs:ro \ - -e HOST_ETC=/hostfs/etc \ - -e HOST_PROC=/hostfs/proc \ - -e HOST_SYS=/hostfs/sys \ - -e HOST_VAR=/hostfs/var \ - -e HOST_RUN=/hostfs/run \ - -e HOST_MOUNT_PREFIX=/hostfs \ - telegraf + -v /:/hostfs:ro \ + -e HOST_ETC=/hostfs/etc \ + -e HOST_PROC=/hostfs/proc \ + -e HOST_SYS=/hostfs/sys \ + -e HOST_VAR=/hostfs/var \ + -e HOST_RUN=/hostfs/run \ + -e HOST_MOUNT_PREFIX=/hostfs \ + telegraf ``` - -### Q: Why do I get a "no such host" error resolving hostnames that other -programs can resolve? +## Q: Why do I get a "no such host" error resolving hostnames that other programs can resolve? Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution). This resolver behaves differently than the C library functions but is more @@ -29,16 +28,18 @@ that are unsupported by the pure Go resolver, you can switch to the cgo resolver. If running manually set: -``` + +```shell export GODEBUG=netdns=cgo ``` If running as a service add the environment variable to `/etc/default/telegraf`: -``` + +```shell GODEBUG=netdns=cgo ``` -### Q: How can I manage series cardinality? +## Q: How can I manage series cardinality? High [series cardinality][], when not properly managed, can cause high load on your database. Telegraf attempts to avoid creating series with high diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 679c24e287604..6f553b060aadb 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -1,4 +1,4 @@ -### Input Plugins +# Input Plugins This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to @@ -8,7 +8,7 @@ to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop and submit new inputs. -### Input Plugin Guidelines +## Input Plugin Guidelines - A plugin must conform to the [telegraf.Input][] interface. - Input Plugins should call `inputs.Add` in their `init` function to register @@ -25,7 +25,7 @@ and submit new inputs. Let's say you've written a plugin that emits metrics about processes on the current host. -### Input Plugin Example +## Input Plugin Example ```go package simple @@ -55,7 +55,7 @@ func (s *Simple) SampleConfig() string { // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Gather(acc telegraf.Accumulator) error { @@ -75,9 +75,9 @@ func init() { ### Development -* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker +- Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment using docker-compose. -* ***[Optional]*** When developing a plugin, add a `dev` directory with a +- ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf` as well as any other supporting files, where sensible. diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md index cfa80a1493757..667cfc617ae4c 100644 --- a/docs/INTEGRATION_TESTS.md +++ b/docs/INTEGRATION_TESTS.md @@ -1,62 +1,63 @@ # Integration Tests -To run our current integration test suite: +To run our current integration test suite: Running the integration tests requires several docker containers to be running. You can start the containers with: -``` + +```shell docker-compose up ``` To run only the integration tests use: -``` +```shell make test-integration ``` Use `make docker-kill` to stop the containers. -Contributing integration tests: +Contributing integration tests: - Add Integration to the end of the test name so it will be run with the above command. - Writes tests where no library is being used in the plugin - There is poor code coverage - It has dynamic code that only gets run at runtime eg: SQL -Current areas we have integration tests: +Current areas we have integration tests: | Area | What it does | |------------------------------------|-------------------------------------------| | Inputs: Aerospike | | | Inputs: Disque | | -| Inputs: Dovecot | | -| Inputs: Mcrouter | | -| Inputs: Memcached | | -| Inputs: Mysql | | -| Inputs: Opcua | | -| Inputs: Openldap | | -| Inputs: Pgbouncer | | -| Inputs: Postgresql | | -| Inputs: Postgresql extensible | | -| Inputs: Procstat / Native windows | | -| Inputs: Prometheus | | -| Inputs: Redis | | -| Inputs: Sqlserver | | -| Inputs: Win perf counters | | -| Inputs: Win services | | -| Inputs: Zookeeper | | -| Outputs: Cratedb / Postgres | | -| Outputs: Elasticsearch | | -| Outputs: Kafka | | -| Outputs: MQTT | | -| Outputs: Nats | | -| Outputs: NSQ | | +| Inputs: Dovecot | | +| Inputs: Mcrouter | | +| Inputs: Memcached | | +| Inputs: Mysql | | +| Inputs: Opcua | | +| Inputs: Openldap | | +| Inputs: Pgbouncer | | +| Inputs: Postgresql | | +| Inputs: Postgresql extensible | | +| Inputs: Procstat / Native windows | | +| Inputs: Prometheus | | +| Inputs: Redis | | +| Inputs: Sqlserver | | +| Inputs: Win perf counters | | +| Inputs: Win services | | +| Inputs: Zookeeper | | +| Outputs: Cratedb / Postgres | | +| Outputs: Elasticsearch | | +| Outputs: Kafka | | +| Outputs: MQTT | | +| Outputs: Nats | | +| Outputs: NSQ | | | Outputs: Postgresql | | Areas we would benefit most from new integration tests: | Area | |------------------------------------| -| SNMP | -| MYSQL | -| SQLSERVER | +| SNMP | +| MYSQL | +| SQLSERVER | diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 294ba17e7fcdd..617e7fee719be 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -35,7 +35,6 @@ following works: - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) - github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) -- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) - github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) - github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) @@ -69,6 +68,7 @@ following works: - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) - github.com/coocood/freecache [MIT License](https://github.com/coocood/freecache/blob/master/LICENSE) +- github.com/coreos/go-semver [Apache License 2.0](https://github.com/coreos/go-semver/blob/main/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) - github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) @@ -114,11 +114,13 @@ following works: - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/googleapis/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) +- github.com/gophercloud/gophercloud [Apache License 2.0](https://github.com/gophercloud/gophercloud/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) - github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) - github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) - github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) - github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) +- github.com/gwos/tcg/sdk [MIT License](https://github.com/gwos/tcg/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) - github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) @@ -138,6 +140,7 @@ following works: - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/intel/iaevents [Apache License 2.0](https://github.com/intel/iaevents/blob/main/LICENSE) - github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE) - github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE) - github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE) @@ -293,5 +296,7 @@ following works: - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) - sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) -## telegraf used and modified code from these projects + +## Telegraf used and modified code from these projects + - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index db8383126ad68..b9baa69a9d3f4 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -1,10 +1,10 @@ -### Output Plugins +# Output Plugins This section is for developers who want to create a new output sink. Outputs are created in a similar manner as collection plugins, and their interface has similar constructs. -### Output Plugin Guidelines +## Output Plugin Guidelines - An output must conform to the [telegraf.Output][] interface. - Outputs should call `outputs.Add` in their `init` function to register @@ -17,7 +17,7 @@ similar constructs. - The `Description` function should say in one line what this output does. - Follow the recommended [Code Style][]. -### Output Plugin Example +## Output Plugin Example ```go package simpleoutput @@ -46,7 +46,7 @@ func (s *Simple) SampleConfig() string { // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Connect() error { @@ -103,6 +103,7 @@ You should also add the following to your `SampleConfig()`: ## Flushing Metrics to Outputs Metrics are flushed to outputs when any of the following events happen: + - `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval - At least `metric_batch_size` count of metrics are waiting in the buffer - The telegraf process has received a SIGUSR1 signal diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 30b2c643de8f6..44def8c9273bf 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -1,8 +1,8 @@ -### Processor Plugins +# Processor Plugins This section is for developers who want to create a new processor plugin. -### Processor Plugin Guidelines +## Processor Plugin Guidelines * A processor must conform to the [telegraf.Processor][] interface. * Processors should call `processors.Add` in their `init` function to register @@ -12,13 +12,13 @@ This section is for developers who want to create a new processor plugin. * The `SampleConfig` function should return valid toml that describes how the processor can be configured. This is include in the output of `telegraf config`. -- The `SampleConfig` function should return valid toml that describes how the +* The `SampleConfig` function should return valid toml that describes how the plugin can be configured. This is included in `telegraf config`. Please consult the [Sample Config][] page for the latest style guidelines. * The `Description` function should say in one line what this processor does. -- Follow the recommended [Code Style][]. +* Follow the recommended [Code Style][]. -### Processor Plugin Example +## Processor Plugin Example ```go package printer @@ -26,47 +26,47 @@ package printer // printer.go import ( - "fmt" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) type Printer struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` ` func (p *Printer) SampleConfig() string { - return sampleConfig + return sampleConfig } func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." + return "Print all metrics that pass through this filter." } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - fmt.Println(metric.String()) - } - return in + for _, metric := range in { + fmt.Println(metric.String()) + } + return in } func init() { - processors.Add("printer", func() telegraf.Processor { - return &Printer{} - }) + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) } ``` -### Streaming Processors +## Streaming Processors Streaming processors are a new processor type available to you. They are particularly useful to implement processor types that use background processes @@ -84,7 +84,7 @@ Some differences from classic Processors: * Processors should call `processors.AddStreaming` in their `init` function to register themselves. See below for a quick example. -### Streaming Processor Example +## Streaming Processor Example ```go package printer @@ -92,30 +92,30 @@ package printer // printer.go import ( - "fmt" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) type Printer struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` ` func (p *Printer) SampleConfig() string { - return sampleConfig + return sampleConfig } func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." + return "Print all metrics that pass through this filter." } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } // Start is called once when the plugin starts; it is only called once per @@ -135,13 +135,13 @@ func (p *Printer) Start(acc telegraf.Accumulator) error { // Metrics you don't want to pass downstream should have metric.Drop() called, // rather than simply omitting the acc.AddMetric() call func (p *Printer) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { - // print! - fmt.Println(metric.String()) - // pass the metric downstream, or metric.Drop() it. - // Metric will be dropped if this function returns an error. - acc.AddMetric(metric) + // print! + fmt.Println(metric.String()) + // pass the metric downstream, or metric.Drop() it. + // Metric will be dropped if this function returns an error. + acc.AddMetric(metric) - return nil + return nil } // Stop gives you an opportunity to gracefully shut down the processor. @@ -154,9 +154,9 @@ func (p *Printer) Stop() error { } func init() { - processors.AddStreaming("printer", func() telegraf.StreamingProcessor { - return &Printer{} - }) + processors.AddStreaming("printer", func() telegraf.StreamingProcessor { + return &Printer{} + }) } ``` diff --git a/docs/PROFILING.md b/docs/PROFILING.md index a0851c8f18b12..428158e690576 100644 --- a/docs/PROFILING.md +++ b/docs/PROFILING.md @@ -6,7 +6,7 @@ By default, the profiling is turned off. To enable profiling you need to specify address to config parameter `pprof-addr`, for example: -``` +```shell telegraf --config telegraf.conf --pprof-addr localhost:6060 ``` @@ -21,4 +21,3 @@ or to look at a 30-second CPU profile: `go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30` To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser. - diff --git a/docs/README.md b/docs/README.md index 99320dee95588..431118259ebce 100644 --- a/docs/README.md +++ b/docs/README.md @@ -21,4 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md -[nightlies]: /docs/NIGHTLIES.md \ No newline at end of file +[nightlies]: /docs/NIGHTLIES.md diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md index 81049fcee9f99..6a187d0fa0c08 100644 --- a/docs/SQL_DRIVERS_INPUT.md +++ b/docs/SQL_DRIVERS_INPUT.md @@ -5,7 +5,7 @@ might change between versions. Please check the driver documentation for availab database | driver | aliases | example DSN | comment ---------------------| ------------------------------------------------------| --------------- | -------------------------------------------------------------------------------------- | ------- -CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres
pgx | see _postgres_ driver | uses PostgresQL driver +CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information @@ -16,28 +16,35 @@ TiDB | [tidb](https://github.com/go-sql-driver/mysql) | m ## Comments ### Driver aliases + Some database drivers are supported though another driver (e.g. CockroachDB). For other databases we provide a more obvious name (e.g. postgres) compared to the driver name. For all of those drivers you might use an _alias_ name during configuration. ### Example data-source-name DSN + The given examples are just that, so please check the driver documentation for the exact format and available options and parameters. Please note that the format of a DSN might also change between driver version. ### Type conversions + Telegraf relies on type conversion of the database driver and/or the golang sql framework. In case you find any problem, please open an issue! ## Help + If nothing seems to work, you might find help in the telegraf forum or in the chat. ### The documentation is wrong + Please open an issue or even better send a pull-request! ### I found a bug + Please open an issue or even better send a pull-request! ### My database is not supported + We currently cannot support CGO drivers in telegraf! Please check if a **pure Go** driver for the [golang sql framework](https://golang.org/pkg/database/sql/) exists. If you found such a driver, please let us know by opening an issue or even better by sending a pull-request! diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md index 42a5abea56f30..74443a24bbd2a 100644 --- a/docs/TEMPLATE_PATTERN.md +++ b/docs/TEMPLATE_PATTERN.md @@ -4,7 +4,8 @@ Template patterns are a mini language that describes how a dot delimited string should be mapped to and from [metrics][]. A template has the form: -``` + +```text "host.mytag.mytag.measurement.measurement.field*" ``` @@ -25,9 +26,9 @@ can also be specified multiple times. **NOTE:** `measurement` must be specified in your template. **NOTE:** `field*` cannot be used in conjunction with `measurement*`. -### Examples +## Examples -#### Measurement & Tag Templates +### Measurement & Tag Templates The most basic template is to specify a single transformation to apply to all incoming metrics. So the following template: @@ -40,7 +41,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text us.west.cpu.load 100 => cpu.load,region=us.west value=100 ``` @@ -55,7 +56,7 @@ templates = [ ] ``` -#### Field Templates +### Field Templates The field keyword tells Telegraf to give the metric that field name. So the following template: @@ -69,7 +70,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.percent.eu-east 100 => cpu_usage,region=eu-east idle_percent=100 ``` @@ -86,12 +87,12 @@ templates = [ which would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.eu-east.idle.percentage 100 => cpu_usage,region=eu-east idle_percentage=100 ``` -#### Filter Templates +### Filter Templates Users can also filter the template(s) to use based on the name of the bucket, using glob matching, like so: @@ -105,7 +106,7 @@ templates = [ which would result in the following transformation: -``` +```text cpu.load.eu-east 100 => cpu_load,region=eu-east value=100 @@ -113,7 +114,7 @@ mem.cached.localhost 256 => mem_cached,host=localhost value=256 ``` -#### Adding Tags +### Adding Tags Additional tags can be added to a metric that don't exist on the received metric. You can add additional tags by specifying them after the pattern. @@ -128,7 +129,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.eu-east 100 => cpu_usage,region=eu-east,datacenter=1a idle=100 ``` diff --git a/docs/TLS.md b/docs/TLS.md index 74b2512f1e59d..133776b7faf73 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -5,9 +5,10 @@ possible, plugins will provide the standard settings described below. With the exception of the advanced configuration available TLS settings will be documented in the sample configuration. -### Client Configuration +## Client Configuration For client TLS support we have the following options: + ```toml ## Root certificates for verifying server certificates encoded in PEM format. # tls_ca = "/etc/telegraf/ca.pem" @@ -52,23 +53,23 @@ for the interest of brevity. ## Define list of allowed ciphers suites. If not defined the default ciphers ## supported by Go will be used. ## ex: tls_cipher_suites = [ -## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", -## "TLS_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_RSA_WITH_AES_128_CBC_SHA", -## "TLS_RSA_WITH_AES_256_CBC_SHA" +## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +## "TLS_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_RSA_WITH_AES_128_CBC_SHA", +## "TLS_RSA_WITH_AES_256_CBC_SHA" ## ] # tls_cipher_suites = [] @@ -80,6 +81,7 @@ for the interest of brevity. ``` Cipher suites for use with `tls_cipher_suites`: + - `TLS_RSA_WITH_RC4_128_SHA` - `TLS_RSA_WITH_3DES_EDE_CBC_SHA` - `TLS_RSA_WITH_AES_128_CBC_SHA` @@ -107,6 +109,7 @@ Cipher suites for use with `tls_cipher_suites`: - `TLS_CHACHA20_POLY1305_SHA256` TLS versions for use with `tls_min_version` or `tls_max_version`: + - `TLS10` - `TLS11` - `TLS12` diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index b0b6ee5adf358..fe77a16bf7475 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -9,29 +9,31 @@ the general steps to set it up. 3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf` 4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""): - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install ``` 5. Edit the configuration file to meet your needs 6. To check that it works, run: - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test ``` 7. To start collecting data, run: - ``` + ```shell > net start telegraf ``` ## Config Directory You can also specify a `--config-directory` for the service to use: + 1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d` 2. Include the `--config-directory` option when registering the service: - ``` + + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d ``` @@ -54,7 +56,7 @@ filtering options. However, if you do need to run multiple telegraf instances on a single system, you can install the service with the `--service-name` and `--service-display-name` flags to give the services unique names: -``` +```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1" > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" ``` @@ -64,7 +66,7 @@ on a single system, you can install the service with the `--service-name` and When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application -**Troubleshooting common error #1067** +### common error #1067 When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start diff --git a/docs/developers/CODE_STYLE.md b/docs/developers/CODE_STYLE.md index 1bbb2b14d84c4..61485aa8c8f98 100644 --- a/docs/developers/CODE_STYLE.md +++ b/docs/developers/CODE_STYLE.md @@ -1,7 +1,8 @@ # Code Style + Code is required to be formatted using `gofmt`, this covers most code style requirements. It is also highly recommended to use `goimports` to automatically order imports. -Please try to keep lines length under 80 characters, the exact number of +Please try to keep lines length under 80 characters, the exact number of characters is not strict but it generally helps with readability. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md index a3da79a5ac8e8..fe262eeed4bd2 100644 --- a/docs/developers/DEPRECATION.md +++ b/docs/developers/DEPRECATION.md @@ -1,4 +1,5 @@ # Deprecation + Deprecation is the primary tool for making changes in Telegraf. A deprecation indicates that the community should move away from using a feature, and documents that the feature will be removed in the next major update (2.0). @@ -36,14 +37,17 @@ Add the deprecation warning to the plugin's README: Log a warning message if the plugin is used. If the plugin is a ServiceInput, place this in the `Start()` function, for regular Input's log it only the first time the `Gather` function is called. + ```go log.Println("W! [inputs.logparser] The logparser plugin is deprecated in 1.10. " + - "Please use the tail plugin with the grok data_format as a replacement.") + "Please use the tail plugin with the grok data_format as a replacement.") ``` + ## Deprecate options Mark the option as deprecated in the sample config, include the deprecation version and any replacement. + ```toml ## Broker URL ## deprecated in 1.7; use the brokers option @@ -54,17 +58,19 @@ In the plugins configuration struct, mention that the option is deprecated: ```go type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers + URL string `toml:"url"` // deprecated in 1.7; use brokers } ``` Finally, use the plugin's `Init() error` method to display a log message at warn level. The message should include the offending configuration option and any suggested replacement: + ```go func (a *AMQPConsumer) Init() error { - if p.URL != "" { - p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") - } - return nil + if p.URL != "" { + p.Log.Warnf("Use of deprecated configuration: 'url'; please use the 'brokers' option") + } + + return nil } ``` diff --git a/docs/developers/LOGGING.md b/docs/developers/LOGGING.md index 60de15699a6e8..e009968c4df36 100644 --- a/docs/developers/LOGGING.md +++ b/docs/developers/LOGGING.md @@ -8,12 +8,13 @@ need to be specified for each log call. ```go type MyPlugin struct { - Log telegraf.Logger `toml:"-"` + Log telegraf.Logger `toml:"-"` } ``` You can then use this Logger in the plugin. Use the method corresponding to the log level of the message. + ```go p.Log.Errorf("Unable to write to file: %v", err) ``` @@ -22,6 +23,7 @@ p.Log.Errorf("Unable to write to file: %v", err) In other sections of the code it is required to add the log level and module manually: + ```go log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) ``` @@ -37,6 +39,7 @@ support setting the log level on a per module basis, it is especially important to not over do it with debug logging. If the plugin is listening on a socket, log a message with the address of the socket: + ```go p.log.InfoF("Listening on %s://%s", protocol, l.Addr()) ``` @@ -59,6 +62,7 @@ normal on some systems. The log level is indicated by a single character at the start of the log message. Adding this prefix is not required when using the Plugin Logger. + - `D!` Debug - `I!` Info - `W!` Warning diff --git a/docs/developers/METRIC_FORMAT_CHANGES.md b/docs/developers/METRIC_FORMAT_CHANGES.md index 32bfe0a2db5a7..7d6477c253aca 100644 --- a/docs/developers/METRIC_FORMAT_CHANGES.md +++ b/docs/developers/METRIC_FORMAT_CHANGES.md @@ -3,14 +3,17 @@ When making changes to an existing input plugin, care must be taken not to change the metric format in ways that will cause trouble for existing users. This document helps developers understand how to make metric format changes safely. ## Changes can cause incompatibilities + If the metric format changes, data collected in the new format can be incompatible with data in the old format. Database queries designed around the old format may not work with the new format. This can cause application failures. Some metric format changes don't cause incompatibilities. Also, some unsafe changes are necessary. How do you know what changes are safe and what to do if your change isn't safe? ## Guidelines + The main guideline is just to keep compatibility in mind when making changes. Often developers are focused on making a change that fixes their particular problem and they forget that many people use the existing code and will upgrade. When you're coding, keep existing users and applications in mind. ### Renaming, removing, reusing + Database queries refer to the metric and its tags and fields by name. Any Telegraf code change that changes those names has the potential to break an existing query. Similarly, removing tags or fields can break queries. Changing the meaning of an existing tag value or field value or reusing an existing one in a new way isn't safe. Although queries that use these tags/field may not break, they will not work as they did before the change. @@ -18,9 +21,11 @@ Changing the meaning of an existing tag value or field value or reusing an exist Adding a field doesn't break existing queries. Queries that select all fields and/or tags (like "select * from") will return an extra series but this is often useful. ### Performance and storage + Time series databases can store large amounts of data but many of them don't perform well on high cardinality data. If a metric format change includes a new tag that holds high cardinality data, database performance could be reduced enough to cause existing applications not to work as they previously did. Metric format changes that dramatically increase the number of tags or fields of a metric can increase database storage requirements unexpectedly. Both of these types of changes are unsafe. ### Make unsafe changes opt-in + If your change has the potential to seriously affect existing users, the change must be opt-in. To do this, add a plugin configuration setting that lets the user select the metric format. Make the setting's default value select the old metric format. When new users add the plugin they can choose the new format and get its benefits. When existing users upgrade, their config files won't have the new setting so the default will ensure that there is no change. When adding a setting, avoid using a boolean and consider instead a string or int for future flexibility. A boolean can only handle two formats but a string can handle many. For example, compare use_new_format=true and features=["enable_foo_fields"]; the latter is much easier to extend and still very descriptive. @@ -28,6 +33,7 @@ When adding a setting, avoid using a boolean and consider instead a string or in If you want to encourage existing users to use the new format you can log a warning once on startup when the old format is selected. The warning should tell users in a gentle way that they can upgrade to a better metric format. If it doesn't make sense to maintain multiple metric formats forever, you can change the default on a major release or even remove the old format completely. See [[Deprecation]] for details. ### Utility + Changes should be useful to many or most users. A change that is only useful for a small number of users may not accepted, even if it's off by default. ## Summary table @@ -39,4 +45,5 @@ Changes should be useful to many or most users. A change that is only useful fo | field | unsafe | unsafe | ok as long as it's useful for existing users and is worth the added space | ## References + InfluxDB Documentation: "Schema and data layout" diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md index 000479c94ce42..b8d4d1739f0b2 100644 --- a/docs/developers/PACKAGING.md +++ b/docs/developers/PACKAGING.md @@ -21,12 +21,14 @@ building the rpm/deb as it is less system dependent. Pull the CI images from quay, the version corresponds to the version of Go that is used to build the binary: -``` + +```shell docker pull quay.io/influxdb/telegraf-ci:1.9.7 ``` Start a shell in the container: -``` + +```shell docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash ``` @@ -42,6 +44,7 @@ From within the container: * Change `include_packages` to change what package you want, run `make help` to see possible values From the host system, copy the build artifacts out of the container: -``` + +```shell docker cp romantic_ptolemy:/go/src/github.com/influxdata/telegraf/build/telegraf-1.10.2-1.x86_64.rpm . ``` diff --git a/docs/developers/PROFILING.md b/docs/developers/PROFILING.md index 81cdf1980304d..c1f02e4080d4c 100644 --- a/docs/developers/PROFILING.md +++ b/docs/developers/PROFILING.md @@ -1,20 +1,24 @@ # Profiling + This article describes how to collect performance traces and memory profiles from Telegraf. If you are submitting this for an issue, please include the version.txt generated below. Use the `--pprof-addr` option to enable the profiler, the easiest way to do this may be to add this line to `/etc/default/telegraf`: -``` + +```shell TELEGRAF_OPTS="--pprof-addr localhost:6060" ``` Restart Telegraf to activate the profile address. -#### Trace Profile +## Trace Profile + Collect a trace during the time where the performance issue is occurring. This example collects a 10 second trace and runs for 10 seconds: -``` + +```shell curl 'http://localhost:6060/debug/pprof/trace?seconds=10' > trace.bin telegraf --version > version.txt go env GOOS GOARCH >> version.txt @@ -22,34 +26,41 @@ go env GOOS GOARCH >> version.txt The `trace.bin` and `version.txt` files can be sent in for analysis or, if desired, you can analyze the trace with: -``` + +```shell go tool trace trace.bin ``` -#### Memory Profile +## Memory Profile + Collect a heap memory profile: -``` + +```shell curl 'http://localhost:6060/debug/pprof/heap' > mem.prof telegraf --version > version.txt go env GOOS GOARCH >> version.txt ``` Analyze: -``` + +```shell $ go tool pprof mem.prof (pprof) top5 ``` -#### CPU Profile +## CPU Profile + Collect a 30s CPU profile: -``` + +```shell curl 'http://localhost:6060/debug/pprof/profile' > cpu.prof telegraf --version > version.txt go env GOOS GOARCH >> version.txt ``` Analyze: -``` + +```shell go tool pprof cpu.prof (pprof) top5 ``` diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md index 0f036d225b7ba..49107c03f9da9 100644 --- a/docs/developers/REVIEWS.md +++ b/docs/developers/REVIEWS.md @@ -9,7 +9,9 @@ All pull requests should follow the style and best practices in the document. ## Process + The review process is roughly structured as follows: + 1. Submit a pull request. Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. 1. Get feedback from a first reviewer and a `ready for final review` tag. @@ -21,6 +23,7 @@ It might take some time until your PR gets merged, depending on the release cycl your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. If there is no activity in a pull-request or the contributor does not respond, we apply the following scheme: + 1. We send a first reminder after at least 2 weeks of inactivity. 1. After at least another two weeks of inactivity we send a second reminder and are setting the `waiting for response` tag. 1. Another two weeks later we will ask the community for help setting the `help wanted` reminder. @@ -34,10 +37,13 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - SampleConfig must match the readme, but not include the plugin name. - structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) - plugins that want to log should declare the Telegraf logger, not use the log package. eg: + ```Go Log telegraf.Logger `toml:"-"` ``` + (in tests, you can do `myPlugin.Log = testutil.Logger{}`) + - Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. - `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. - plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. @@ -67,6 +73,9 @@ So in case you expect a longer period of inactivity or you want to abandon a pul - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. +## Linting + +Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. ## Testing @@ -82,6 +91,7 @@ used for assertions within the tests when possible, with preference towards github.com/stretchr/testify/require. Primarily use the require package to avoid cascading errors: + ```go assert.Equal(t, lhs, rhs) # avoid require.Equal(t, lhs, rhs) # good @@ -96,6 +106,7 @@ Ensure the [[SampleConfig]] and match with the current standards. READMEs should: + - be spaces, not tabs - be indented consistently, matching other READMEs - have two `#` for comments @@ -121,7 +132,8 @@ Metrics use `snake_case` naming style. Generally enumeration data should be encoded as a tag. In some cases it may be desirable to also include the data as an integer field: -``` + +```shell net_response,result=success result_code=0i ``` @@ -129,7 +141,8 @@ net_response,result=success result_code=0i Use tags for each range with the `le` tag, and `+Inf` for the values out of range. This format is inspired by the Prometheus project: -``` + +```shell cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md index d0969212fecb2..2f67535de54b2 100644 --- a/docs/developers/SAMPLE_CONFIG.md +++ b/docs/developers/SAMPLE_CONFIG.md @@ -5,13 +5,15 @@ The sample config file is generated from a results of the `SampleConfig()` and You can generate a full sample config: -``` + +```shell telegraf config ``` You can also generate the config for a particular plugin using the `-usage` option: -``` + +```shell telegraf --usage influxdb ``` @@ -21,6 +23,7 @@ In the config file we use 2-space indention. Since the config is [TOML](https://github.com/toml-lang/toml) the indention has no meaning. Documentation is double commented, full sentences, and ends with a period. + ```toml ## This text describes what an the exchange_type option does. # exchange_type = "topic" @@ -29,14 +32,15 @@ Documentation is double commented, full sentences, and ends with a period. Try to give every parameter a default value whenever possible. If an parameter does not have a default or must frequently be changed then have it uncommented. + ```toml ## Brokers are the AMQP brokers to connect to. brokers = ["amqp://localhost:5672"] ``` - Options where the default value is usually sufficient are normally commented out. The commented out value is the default. + ```toml ## What an exchange type is. # exchange_type = "topic" @@ -44,6 +48,7 @@ out. The commented out value is the default. If you want to show an example of a possible setting filled out that is different from the default, show both: + ```toml ## Static routing key. Used when no routing_tag is set or as a fallback ## when the tag specified in routing tag is not found. @@ -53,6 +58,7 @@ different from the default, show both: Unless parameters are closely related, add a space between them. Usually parameters is closely related have a single description. + ```toml ## If true, queue will be declared as an exclusive queue. # queue_exclusive = false diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md index 1ee6cc7517c74..5b8b8bb216796 100644 --- a/docs/maintainers/LABELS.md +++ b/docs/maintainers/LABELS.md @@ -26,6 +26,7 @@ For bugs you may want to add `panic`, `regression`, or `upstream` to provide further detail. Summary of Labels: + | Label | Description | Purpose | | --- | ----------- | ---| | `area/*` | These labels each corresponding to a plugin or group of plugins that can be added to identify the affected plugin or group of plugins | categorization | @@ -40,9 +41,9 @@ Summary of Labels: | `good first issue` | This is a smaller issue suited for getting started in Telegraf, Golang, and contributing to OSS | community | | `help wanted` | Request for community participation, code, contribution | community | | `need more info` | Issue triaged but outstanding questions remain | community | -| `performance` | Issues or PRs that address performance issues | categorization| +| `performance` | Issues or PRs that address performance issues | categorization| | `platform/*` | Issues that only apply to one platform | categorization | -| `plugin/*` | 1. Request for new * plugins 2. Issues/PRs that are related to * plugins | categorization | +| `plugin/*` | Request for new plugins and issues/PRs that are related to plugins | categorization | | `ready for final review` | Pull request has been reviewed and/or tested by multiple users and is ready for a final review | triage | | `rfc` | Request for comment - larger topics of discussion that are looking for feedback | community | | `support` |Telegraf questions, may be directed to community site or slack | triage | @@ -66,7 +67,3 @@ We close issues for the following reasons: | `closed/not-reproducible` | Given the information we have we can't reproduce the issue | | `closed/out-of-scope` | The feature request is out of scope for Telegraf - highly unlikely to be worked on | | `closed/question` | This issue is a support question, directed to community site or slack | - - - - diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md index 90c49fd5af689..5a627d4cc29ec 100644 --- a/docs/maintainers/PULL_REQUESTS.md +++ b/docs/maintainers/PULL_REQUESTS.md @@ -2,7 +2,7 @@ ## Before Review -Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The +Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The only exemption would be non-copyrightable changes such as fixing a typo. Check that all tests are passing. Due to intermittent errors in the CI tests @@ -36,13 +36,15 @@ history and this method allows us to normalize commit messages as well as simplifies backporting. ### Rewriting the commit message + After selecting "Squash and Merge" you may need to rewrite the commit message. Usually the body of the commit messages should be cleared as well, unless it -is well written and applies to the entire changeset. -- Use imperative present tense for the first line of the message: - - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") -- The default merge commit messages include the PR number at the end of the -commit message, keep this in the final message. +is well written and applies to the entire changeset. + +- Use imperative present tense for the first line of the message: + - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") +- The default merge commit messages include the PR number at the end of the +commit message, keep this in the final message. - If applicable mention the plugin in the message. **Example Enhancement:** @@ -59,7 +61,8 @@ commit message, keep this in the final message. If required, backport the patch and the changelog update to the current release branch. Usually this can be done by cherry picking the commits: -``` + +```shell git cherry-pick -x aaaaaaaa bbbbbbbb ``` diff --git a/docs/maintainers/RELEASES.md b/docs/maintainers/RELEASES.md index 3c05cdf968715..7eb2522cfd0e8 100644 --- a/docs/maintainers/RELEASES.md +++ b/docs/maintainers/RELEASES.md @@ -3,21 +3,25 @@ ## Release Branch On master, update `etc/telegraf.conf` and commit: + ```sh ./telegraf config > etc/telegraf.conf ``` Create the new release branch: + ```sh git checkout -b release-1.15 ``` Push the changes: + ```sh git push origin release-1.15 master ``` Update next version strings on master: + ```sh git checkout master echo 1.16.0 > build_version.txt @@ -29,6 +33,7 @@ Release candidates are created only for new minor releases (ex: 1.15.0). Tags are created but some of the other tasks, such as adding a changelog entry are skipped. Packages are added to the github release page and posted to community but are not posted to package repos or docker hub. + ```sh git checkout release-1.15 git commit --allow-empty -m "Telegraf 1.15.0-rc1" @@ -40,6 +45,7 @@ git push origin release-1.15 v1.15.0-rc1 On master, set the release date in the changelog and cherry-pick the change back: + ```sh git checkout master vi CHANGELOG.md @@ -52,6 +58,7 @@ Double check that the changelog was applied as desired, or fix it up and amend the change before pushing. Tag the release: + ```sh git checkout release-1.8 # This just improves the `git show 1.8.0` output @@ -61,6 +68,7 @@ git tag -s v1.8.0 -m "Telegraf 1.8.0" Check that the version was set correctly, the tag can always be altered if a mistake is made but only before you push it to Github: + ```sh make ./telegraf --version @@ -69,6 +77,7 @@ Telegraf v1.8.0 (git: release-1.8 aaaaaaaa) When you push a branch with a tag to Github, CircleCI will be triggered to build the packages. + ```sh git push origin master release-1.8 v1.8.0 ``` @@ -82,6 +91,7 @@ Update apt and yum repositories hosted at repos.influxdata.com. Update the package signatures on S3, these are used primarily by the docker images. Update docker image [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker): + ```sh cd influxdata-docker git co master diff --git a/etc/telegraf.conf b/etc/telegraf.conf index ae5680b32d52f..1e7e91ab62df8 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -317,7 +317,7 @@ # # Sends metrics to Azure Data Explorer # [[outputs.azure_data_explorer]] -# ## Azure Data Exlorer cluster endpoint +# ## Azure Data Explorer cluster endpoint # ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" # endpoint_url = "" # @@ -337,6 +337,9 @@ # ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # # table_name = "" # +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true # # Send aggregate metrics to Azure Monitor @@ -370,6 +373,24 @@ # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + # # Publish Telegraf metrics to a Google Cloud PubSub topic # [[outputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -655,6 +676,31 @@ # ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string # ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's # force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" # # Send metrics to command as input over stdin @@ -765,11 +811,19 @@ # ## Endpoints for your graylog instances. # servers = ["udp://127.0.0.1:12201"] # +# ## Connection timeout. +# # timeout = "5s" +# # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" # # short_message_field = "" # +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -778,6 +832,28 @@ # # insecure_skip_verify = false +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" + + # # Configurable HTTP health check resource based on metrics # [[outputs.health]] # ## Address and port to listen on. @@ -861,6 +937,11 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" # +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# # ## HTTP Content-Encoding for write request body, can be set to "gzip" to # ## compress body or "identity" to apply no encoding. # # content_encoding = "identity" @@ -874,6 +955,27 @@ # ## Maximum amount of time before idle connection is closed. # ## Zero means no limit. # # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" # # Configuration for sending metrics to InfluxDB @@ -1224,6 +1326,42 @@ # # tls_key = "/etc/telegraf/key.pem" +# # Sends metrics to MongoDB +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" +# +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" + + # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. @@ -2420,7 +2558,7 @@ # [[processors.printer]] -# # Transforms tag and field values with regex pattern +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern # [[processors.regex]] # ## Tag and field conversions defined in a separate sub-tables # # [[processors.regex.tags]] @@ -2450,6 +2588,38 @@ # # pattern = ".*category=(\\w+).*" # # replacement = "${1}" # # result_key = "search_category" +# +# ## Rename metric fields +# # [[processors.regex.field_rename]] +# # ## Regular expression to match on a field name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new field name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed field OR you can "keep" +# # ## both the existing and source field. +# # # result_key = "keep" +# +# ## Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# ## Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" # # Rename measurements, tags, and fields that pass through this filter. @@ -2832,6 +3002,37 @@ # # compression = 100.0 +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + # # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: @@ -3834,6 +4035,15 @@ # # ## List of interfaces to ignore when pulling metrics. # # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] # # Read metrics from one or more commands that can output to stdout @@ -4719,6 +4929,7 @@ # # tls_ca = "/path/to/cafile" # # tls_cert = "/path/to/certfile" # # tls_key = "/path/to/keyfile" +# # tls_server_name = "kubernetes.example.com" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -4812,6 +5023,12 @@ # # ] +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key @@ -4960,12 +5177,21 @@ # # parity = "N" # # stop_bits = 1 # +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# # ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" # ## default behaviour is "TCP" if the controller is TCP # ## For Serial you can choose between "RTU" and "ASCII" # # transmission_mode = "RTU" # -# ## Measurements +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# configuration_type = "register" +# +# ## Per register definition # ## # # ## Digital Variables, Discrete Inputs and Coils @@ -5012,6 +5238,98 @@ # { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, # { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, # ] +# +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# # [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# # slave_id = 0 +# +# ## Byte order of the data. +# ## |---ABCD or MSW-BE -- Big Endian (Motorola) +# ## |---DCBA or LSW-LE -- Little Endian (Intel) +# ## |---BADC or MSW-LE -- Big Endian with byte swap +# ## |---CDAB or LSW-BE -- Little Endian with byte swap +# # byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# # register = "holding" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: Those fields are ignored if field is omitted ("omit"=true) +# ## +# ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types +# ## the fields are output as zero or one in UINT64 format by default. +# +# ## Coil / discrete input example +# # fields = [ +# # { address=0, name="motor1_run"}, +# # { address=1, name="jog", measurement="motor"}, +# # { address=2, name="motor1_stop", omit=true}, +# # { address=3, name="motor1_overheating"}, +# # ] +# +# ## Per-request tags +# ## These tags take precedence over predefined tags. +# # [[inputs.modbus.request.tags]] +# # name = "value" +# +# ## Holding / input example +# ## All of those examples will result in FLOAT64 field outputs +# # fields = [ +# # { address=0, name="voltage", type="INT16", scale=0.1 }, +# # { address=1, name="current", type="INT32", scale=0.001 }, +# # { address=3, name="power", type="UINT32", omit=true }, +# # { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# # { address=7, name="frequency", type="UINT32", scale=0.1 }, +# # { address=8, name="power_factor", type="INT64", scale=0.01 }, +# # ] +# +# ## Holding / input example with type conversions +# # fields = [ +# # { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# # { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# # { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# # ] +# +# ## Per-request tags +# ## These tags take precedence over predefined tags. +# # [[inputs.modbus.request.tags]] +# # name = "value" +# +# +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain +# ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) +# ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false # # Read metrics from one or many MongoDB servers @@ -5021,7 +5339,7 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] # # ## When true, collect cluster status # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -5408,6 +5726,20 @@ # # insecure_skip_verify = false +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # A plugin to collect stats from the NSD authoritative DNS name server # [[inputs.nsd]] # ## Address of server to connect to, optionally ':port'. Defaults to the @@ -5460,7 +5792,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -5506,6 +5840,12 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # # ## Node ID configuration # ## name - field name to use in the output # ## namespace - OPC UA namespace of the node (integer value 0 thru 3) @@ -5593,6 +5933,59 @@ # timeout = 1000 +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", +# ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data recieved from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false +# +# ## Measure Openstack call duration +# # measure_openstack_requests = false + + # # Read current weather and forecasts data from openweathermap.org # [[inputs.openweathermap]] # ## OpenWeatherMap API key. @@ -6085,6 +6478,14 @@ # # ## Timeout for the cli command to complete. # # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" # # Retrieves SNMP values from remote agents @@ -6105,6 +6506,9 @@ # ## SNMP version; can be 1, 2, or 3. # # version = 2 # +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# # ## Agent host tag; the tag used to reference the source host # # agent_host_tag = "agent_host" # @@ -6544,6 +6948,27 @@ # # timeout = "1s" +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # Collect Wireguard server interface and peer statistics # [[inputs.wireguard]] # ## Optional list of Wireguard device/interface names to query. @@ -6623,30 +7048,6 @@ ############################################################################### -# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.KNXListener]] -# ## Type of KNX-IP interface. -# ## Can be either "tunnel" or "router". -# # service_type = "tunnel" -# -# ## Address of the KNX-IP interface. -# service_address = "localhost:3671" -# -# ## Measurement definition(s) -# # [[inputs.knx_listener.measurement]] -# # ## Name of the measurement -# # name = "temperature" -# # ## Datapoint-Type (DPT) of the KNX messages -# # dpt = "9.001" -# # ## List of Group-Addresses (GAs) assigned to the measurement -# # addresses = ["5/5/1"] -# -# # [[inputs.knx_listener.measurement]] -# # name = "illumination" -# # dpt = "9.004" -# # addresses = ["5/5/3"] - - # # Pull Metric Statistics from Aliyun CMS # [[inputs.aliyuncms]] # ## Aliyun Credentials @@ -7489,6 +7890,55 @@ # # token = "some-long-shared-secret-token" +# # Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +# [[inputs.intel_pmu]] +# ## List of filesystem locations of JSON files that contain PMU event definitions. +# event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] +# +# ## List of core events measurement entities. There can be more than one core_events sections. +# [[inputs.intel_pmu.core_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. +# events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] +# +# ## Limits the counting of events to core numbers specified. +# ## If absent, events are counted on all cores. +# ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. +# ## example: cores = ["0,2", "4", "12-16"] +# cores = ["0"] +# +# ## Indicator that plugin shall attempt to run core_events.events as a single perf group. +# ## If absent or set to false, each event is counted individually. Defaults to false. +# ## This limits the number of events that can be measured to a maximum of available hardware counters per core. +# ## Could vary depending on type of event, use of fixed counters. +# # perf_group = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# ## Can be applied to any group of events, unrelated to perf_group setting. +# # events_tag = "" +# +# ## List of uncore event measurement entities. There can be more than one uncore_events sections. +# [[inputs.intel_pmu.uncore_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. +# events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] +# +# ## Limits the counting of events to specified sockets. +# ## If absent, events are counted on all sockets. +# ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. +# ## example: sockets = ["0-2"] +# sockets = ["0"] +# +# ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. +# ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. +# # aggregate_uncore_units = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# # events_tag = "" + + # # Intel Resource Director Technology plugin # [[inputs.intel_rdt]] # ## Optionally set sampling interval to Nx100ms. @@ -7656,6 +8106,15 @@ # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 # +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -7848,23 +8307,21 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] # servers = ["tcp://127.0.0.1:1883"] -# # ## Topics that will be subscribed to. # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] -# +# # topic_fields = "_/_/_/temperature" # ## The message topic will be stored in a tag specified by this value. If set # ## to the empty string no topic tag will be created. # # topic_tag = "topic" -# # ## QoS policy for messages # ## 0 = at most once # ## 1 = at least once @@ -7873,10 +8330,8 @@ # ## When using a QoS of 1 or 2, you should enable persistent_session to allow # ## resuming unacknowledged messages. # # qos = 0 -# # ## Connection timeout for initial connection in seconds # # connection_timeout = "30s" -# # ## Maximum messages to read from the broker that have not been written by an # ## output. For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. @@ -7886,33 +8341,37 @@ # ## full batch is collected and the write is triggered immediately without # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 -# # ## Persistent session disables clearing of the client session on connection. # ## In order for this option to work you must also set client_id to identify # ## the client. To receive messages that arrived while the client is offline, # ## also set the qos option to 1 or 2 and don't forget to also set the QoS when # ## publishing. # # persistent_session = false -# # ## If unset, a random client ID will be generated. # # client_id = "" -# # ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" -# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false -# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# ## [[inputs.mqtt_consumer.topic_parsing]] +# ## topic = "" +# ## measurement = "" +# ## tags = "" +# ## fields = "" +# ## [inputs.mqtt_consumer.topic_parsing.types] +# ## # # Read metrics from NATS subject(s) @@ -8476,42 +8935,34 @@ # # ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 # ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" -# -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" # -# # database_type = "AzureSQLDB" +# database_type = "SQLServer" # -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] # # ## A list of queries to explicitly ignore. -# # exclude_query = [] -# -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -# -# # database_type = "AzureSQLManagedInstance" -# -# # include_query = [] -# -# # exclude_query = [] +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] # # ## Queries enabled by default for database_type = "SQLServer" are - # ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates # -# database_type = "SQLServer" +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers # -# include_query = [] +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers # -# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers # -# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use # ## the new mechanism of identifying the database_type there by use it's corresponding queries # # ## Optional parameter, setting this to 2 will use a new version diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 01091328e31ab..d7d1cb871cd0d 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -317,7 +317,7 @@ # # Sends metrics to Azure Data Explorer # [[outputs.azure_data_explorer]] -# ## Azure Data Exlorer cluster endpoint +# ## Azure Data Explorer cluster endpoint # ## ex: endpoint_url = "https://clustername.australiasoutheast.kusto.windows.net" # endpoint_url = "" # @@ -337,6 +337,9 @@ # ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). # # table_name = "" # +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true # # Send aggregate metrics to Azure Monitor @@ -370,6 +373,24 @@ # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + # # Publish Telegraf metrics to a Google Cloud PubSub topic # [[outputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -655,6 +676,31 @@ # ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string # ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's # force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" # # Send metrics to command as input over stdin @@ -772,6 +818,40 @@ # ## "telegraf" will be used. # ## example: short_message_field = "message" # # short_message_field = "" +# +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" # # Configurable HTTP health check resource based on metrics @@ -857,6 +937,11 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" # +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# # ## HTTP Content-Encoding for write request body, can be set to "gzip" to # ## compress body or "identity" to apply no encoding. # # content_encoding = "identity" @@ -870,6 +955,27 @@ # ## Maximum amount of time before idle connection is closed. # ## Zero means no limit. # # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" # # Configuration for sending metrics to InfluxDB @@ -1077,6 +1183,9 @@ # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1217,6 +1326,42 @@ # # tls_key = "/etc/telegraf/key.pem" +# # Sends metrics to MongoDB +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" +# +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" + + # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. @@ -2413,7 +2558,7 @@ # [[processors.printer]] -# # Transforms tag and field values with regex pattern +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern # [[processors.regex]] # ## Tag and field conversions defined in a separate sub-tables # # [[processors.regex.tags]] @@ -2443,6 +2588,38 @@ # # pattern = ".*category=(\\w+).*" # # replacement = "${1}" # # result_key = "search_category" +# +# ## Rename metric fields +# # [[processors.regex.field_rename]] +# # ## Regular expression to match on a field name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new field name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed field OR you can "keep" +# # ## both the existing and source field. +# # # result_key = "keep" +# +# ## Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# ## Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" # # Rename measurements, tags, and fields that pass through this filter. @@ -2825,6 +3002,37 @@ # # compression = 100.0 +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + # # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: @@ -3765,6 +3973,15 @@ # # ## List of interfaces to ignore when pulling metrics. # # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] # # Read metrics from one or more commands that can output to stdout @@ -4609,6 +4826,7 @@ # # tls_ca = "/path/to/cafile" # # tls_cert = "/path/to/certfile" # # tls_key = "/path/to/keyfile" +# # tls_server_name = "kubernetes.example.com" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -4833,12 +5051,21 @@ # # parity = "N" # # stop_bits = 1 # +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# # ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" # ## default behaviour is "TCP" if the controller is TCP # ## For Serial you can choose between "RTU" and "ASCII" # # transmission_mode = "RTU" # -# ## Measurements +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# configuration_type = "register" +# +# ## Per register definition # ## # # ## Digital Variables, Discrete Inputs and Coils @@ -4885,6 +5112,98 @@ # { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, # { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, # ] +# +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# # [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# # slave_id = 0 +# +# ## Byte order of the data. +# ## |---ABCD or MSW-BE -- Big Endian (Motorola) +# ## |---DCBA or LSW-LE -- Little Endian (Intel) +# ## |---BADC or MSW-LE -- Big Endian with byte swap +# ## |---CDAB or LSW-BE -- Little Endian with byte swap +# # byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# # register = "holding" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: Those fields are ignored if field is omitted ("omit"=true) +# ## +# ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types +# ## the fields are output as zero or one in UINT64 format by default. +# +# ## Coil / discrete input example +# # fields = [ +# # { address=0, name="motor1_run"}, +# # { address=1, name="jog", measurement="motor"}, +# # { address=2, name="motor1_stop", omit=true}, +# # { address=3, name="motor1_overheating"}, +# # ] +# +# ## Per-request tags +# ## These tags take precedence over predefined tags. +# # [[inputs.modbus.request.tags]] +# # name = "value" +# +# ## Holding / input example +# ## All of those examples will result in FLOAT64 field outputs +# # fields = [ +# # { address=0, name="voltage", type="INT16", scale=0.1 }, +# # { address=1, name="current", type="INT32", scale=0.001 }, +# # { address=3, name="power", type="UINT32", omit=true }, +# # { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# # { address=7, name="frequency", type="UINT32", scale=0.1 }, +# # { address=8, name="power_factor", type="INT64", scale=0.01 }, +# # ] +# +# ## Holding / input example with type conversions +# # fields = [ +# # { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# # { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# # { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# # ] +# +# ## Per-request tags +# ## These tags take precedence over predefined tags. +# # [[inputs.modbus.request.tags]] +# # name = "value" +# +# +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain +# ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) +# ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false # # Read metrics from one or many MongoDB servers @@ -4894,7 +5213,7 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# servers = ["mongodb://127.0.0.1:27017?connect=direct"] # # ## When true, collect cluster status # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -5281,6 +5600,20 @@ # # insecure_skip_verify = false +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # A plugin to collect stats from the NSD authoritative DNS name server # [[inputs.nsd]] # ## Address of server to connect to, optionally ':port'. Defaults to the @@ -5333,7 +5666,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -5472,6 +5807,59 @@ # timeout = 1000 +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", +# ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data recieved from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false +# +# ## Measure Openstack call duration +# # measure_openstack_requests = false + + # # Read current weather and forecasts data from openweathermap.org # [[inputs.openweathermap]] # ## OpenWeatherMap API key. @@ -5947,6 +6335,14 @@ # # ## Timeout for the cli command to complete. # # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" # # Retrieves SNMP values from remote agents @@ -5967,6 +6363,9 @@ # ## SNMP version; can be 1, 2, or 3. # # version = 2 # +# ## Path to mib files +# # path = ["/usr/share/snmp/mibs"] +# # ## Agent host tag; the tag used to reference the source host # # agent_host_tag = "agent_host" # @@ -6309,6 +6708,27 @@ # # timeout = "5s" +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # Input plugin to collect Windows Event Log messages # [[inputs.win_eventlog]] # ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels @@ -6403,6 +6823,11 @@ # # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. # # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. # #UseWildcardsExpansion = false +# # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will +# # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead +# # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this +# # setting is false. +# #LocalizeWildcardsExpansion = true # # Period after which counters will be reread from configuration and wildcards in counter paths expanded # CountersRefreshInterval="1m" # @@ -6521,6 +6946,7 @@ # "TermService", # "Win*", # ] +# #excluded_service_names = [] # optional, list of service names to exclude # # Collect Wireguard server interface and peer statistics @@ -6602,30 +7028,6 @@ ############################################################################### -# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. -# [[inputs.KNXListener]] -# ## Type of KNX-IP interface. -# ## Can be either "tunnel" or "router". -# # service_type = "tunnel" -# -# ## Address of the KNX-IP interface. -# service_address = "localhost:3671" -# -# ## Measurement definition(s) -# # [[inputs.knx_listener.measurement]] -# # ## Name of the measurement -# # name = "temperature" -# # ## Datapoint-Type (DPT) of the KNX messages -# # dpt = "9.001" -# # ## List of Group-Addresses (GAs) assigned to the measurement -# # addresses = ["5/5/1"] -# -# # [[inputs.knx_listener.measurement]] -# # name = "illumination" -# # dpt = "9.004" -# # addresses = ["5/5/3"] - - # # Pull Metric Statistics from Aliyun CMS # [[inputs.aliyuncms]] # ## Aliyun Credentials @@ -7570,6 +7972,9 @@ # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # @@ -7602,6 +8007,15 @@ # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 # +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -7794,23 +8208,21 @@ # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] # servers = ["tcp://127.0.0.1:1883"] -# # ## Topics that will be subscribed to. # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] -# +# # topic_fields = "_/_/_/temperature" # ## The message topic will be stored in a tag specified by this value. If set # ## to the empty string no topic tag will be created. # # topic_tag = "topic" -# # ## QoS policy for messages # ## 0 = at most once # ## 1 = at least once @@ -7819,10 +8231,8 @@ # ## When using a QoS of 1 or 2, you should enable persistent_session to allow # ## resuming unacknowledged messages. # # qos = 0 -# # ## Connection timeout for initial connection in seconds # # connection_timeout = "30s" -# # ## Maximum messages to read from the broker that have not been written by an # ## output. For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. @@ -7832,33 +8242,37 @@ # ## full batch is collected and the write is triggered immediately without # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 -# # ## Persistent session disables clearing of the client session on connection. # ## In order for this option to work you must also set client_id to identify # ## the client. To receive messages that arrived while the client is offline, # ## also set the qos option to 1 or 2 and don't forget to also set the QoS when # ## publishing. # # persistent_session = false -# # ## If unset, a random client ID will be generated. # # client_id = "" -# # ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" -# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false -# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# ## [[inputs.mqtt_consumer.topic_parsing]] +# ## topic = "" +# ## measurement = "" +# ## tags = "" +# ## fields = "" +# ## [inputs.mqtt_consumer.topic_parsing.types] +# ## # # Read metrics from NATS subject(s) @@ -8415,42 +8829,34 @@ # # ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 # ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" -# -# ## Queries enabled by default for database_type = "AzureSQLDB" are - -# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" # -# # database_type = "AzureSQLDB" +# database_type = "SQLServer" # -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] # # ## A list of queries to explicitly ignore. -# # exclude_query = [] -# -# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -# -# # database_type = "AzureSQLManagedInstance" -# -# # include_query = [] -# -# # exclude_query = [] +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] # # ## Queries enabled by default for database_type = "SQLServer" are - # ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates # -# database_type = "SQLServer" +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers # -# include_query = [] +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers # -# ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default -# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers # -# ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use # ## the new mechanism of identifying the database_type there by use it's corresponding queries # # ## Optional parameter, setting this to 2 will use a new version diff --git a/go.mod b/go.mod index 436f601e5ee07..31e937a677a51 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v0.3.1 + github.com/BurntSushi/toml v0.4.1 github.com/Masterminds/goutils v1.1.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible @@ -50,32 +50,31 @@ require ( github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 github.com/armon/go-metrics v0.3.3 // indirect - github.com/aws/aws-sdk-go-v2 v1.9.1 - github.com/aws/aws-sdk-go-v2/config v1.8.2 - github.com/aws/aws-sdk-go-v2/credentials v1.4.2 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 + github.com/aws/aws-sdk-go-v2 v1.9.2 + github.com/aws/aws-sdk-go-v2/config v1.8.3 + github.com/aws/aws-sdk-go-v2/credentials v1.4.3 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 // indirect github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 github.com/aws/aws-sdk-go-v2/service/ec2 v1.1.0 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0 github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 + github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 github.com/aws/smithy-go v1.8.0 github.com/benbjohnson/clock v1.1.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bitly/go-hostpool v0.1.0 // indirect github.com/bmatcuk/doublestar/v3 v3.0.0 - github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect @@ -83,7 +82,7 @@ require ( github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.7 // indirect github.com/coocood/freecache v1.1.1 - github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-semver v0.3.0 github.com/couchbase/go-couchbase v0.1.0 github.com/couchbase/gomemcached v0.1.3 // indirect github.com/couchbase/goutils v0.1.0 // indirect @@ -102,13 +101,13 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/echlebek/timeproxy v1.0.0 // indirect - github.com/eclipse/paho.mqtt.golang v1.3.0 - github.com/fatih/color v1.10.0 // indirect + github.com/eclipse/paho.mqtt.golang v1.3.5 + github.com/fatih/color v1.10.0 github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 github.com/go-logfmt/logfmt v0.5.0 github.com/go-logr/logr v0.4.0 // indirect - github.com/go-ole/go-ole v1.2.5 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible github.com/go-sql-driver/mysql v1.6.0 @@ -117,12 +116,10 @@ require ( github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.3.0+incompatible - github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt/v4 v4.1.0 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-cmp v0.5.6 @@ -132,13 +129,15 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/googleapis/gnostic v0.5.5 // indirect - github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 + github.com/gopcua/opcua v0.2.3 + github.com/gophercloud/gophercloud v0.16.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.4.2 - github.com/gosnmp/gosnmp v1.32.0 + github.com/gosnmp/gosnmp v1.33.0 github.com/grid-x/modbus v0.0.0-20210224155242-c4a3d042e99b github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec github.com/hashicorp/consul/api v1.9.1 @@ -147,17 +146,18 @@ require ( github.com/hashicorp/go-immutable-radix v1.2.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/serf v0.9.5 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/influxdata/go-syslog/v3 v3.0.0 - github.com/influxdata/influxdb-observability/common v0.2.7 - github.com/influxdata/influxdb-observability/influx2otel v0.2.7 - github.com/influxdata/influxdb-observability/otel2influx v0.2.7 + github.com/influxdata/influxdb-observability/common v0.2.8 + github.com/influxdata/influxdb-observability/influx2otel v0.2.8 + github.com/influxdata/influxdb-observability/otel2influx v0.2.8 github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/intel/iaevents v1.0.0 github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.8.1 github.com/jackc/pgio v1.0.0 @@ -177,8 +177,6 @@ require ( github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/klauspost/compress v1.13.6 // indirect - github.com/kr/pretty v0.3.0 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -204,9 +202,9 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/jwt/v2 v2.0.2 // indirect - github.com/nats-io/nats-server/v2 v2.2.6 - github.com/nats-io/nats.go v1.11.0 + github.com/nats-io/jwt/v2 v2.1.0 // indirect + github.com/nats-io/nats-server/v2 v2.6.5 + github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 github.com/nats-io/nkeys v0.3.0 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/newrelic/newrelic-telemetry-sdk-go v0.5.1 @@ -242,7 +240,7 @@ require ( github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect github.com/sensu/sensu-go/api/core/v2 v2.9.0 - github.com/shirou/gopsutil v3.21.8+incompatible + github.com/shirou/gopsutil/v3 v3.21.10 github.com/showwin/speedtest-go v1.1.4 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect @@ -278,12 +276,12 @@ require ( github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e // indirect go.etcd.io/etcd/api/v3 v3.5.0 // indirect - go.mongodb.org/mongo-driver v1.5.3 + go.mongodb.org/mongo-driver v1.7.3 go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 - go.opentelemetry.io/otel/metric v0.23.0 - go.opentelemetry.io/otel/sdk/metric v0.23.0 + go.opentelemetry.io/collector/model v0.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 + go.opentelemetry.io/otel/metric v0.24.0 + go.opentelemetry.io/otel/sdk/metric v0.24.0 go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.6.0 // indirect @@ -292,18 +290,18 @@ require ( golang.org/x/net v0.0.0-20211005215030-d2e5035098b3 golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef + golang.org/x/sys v0.0.0-20211013075003-97ac67df715c golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.5 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.zx2c4.com/wireguard v0.0.20200121 // indirect golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.54.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210827211047-25e5f791fe06 - google.golang.org/grpc v1.40.0 + google.golang.org/grpc v1.41.0 google.golang.org/protobuf v1.27.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/djherbis/times.v1 v1.2.0 @@ -319,7 +317,6 @@ require ( gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - gotest.tools v2.2.0+incompatible k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 k8s.io/client-go v0.22.2 @@ -338,13 +335,16 @@ require ( sigs.k8s.io/yaml v1.2.0 // indirect ) +require github.com/libp2p/go-reuseport v0.1.0 + require ( - github.com/aws/aws-sdk-go v1.38.3 // indirect github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jackc/puddle v1.1.3 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -352,15 +352,15 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pierrec/lz4/v4 v4.1.8 // indirect - github.com/rogpeppe/go-internal v1.6.2 // indirect - go.opentelemetry.io/otel v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 // indirect - go.opentelemetry.io/otel/internal/metric v0.23.0 // indirect - go.opentelemetry.io/otel/sdk v1.0.0-RC3 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.23.0 // indirect - go.opentelemetry.io/otel/trace v1.0.0-RC3 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 // indirect + go.opentelemetry.io/otel/internal/metric v0.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/sdk/export/metric v0.24.0 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect go.opentelemetry.io/proto/otlp v0.9.0 // indirect ) @@ -375,3 +375,9 @@ replace github.com/mdlayher/apcupsd => github.com/influxdata/apcupsd v0.0.0-2021 //https://github.com/WireGuard/wgctrl-go/blob/e35592f146e40ce8057113d14aafcc3da231fbac/go.mod#L12 ) was not working when using GOPROXY=direct. //Replacing with the pseudo-version works around this. replace golang.zx2c4.com/wireguard v0.0.20200121 => golang.zx2c4.com/wireguard v0.0.0-20200121152719-05b03c675090 + +// replaced due to open PR updating protobuf https://github.com/cisco-ie/nx-telemetry-proto/pull/1 +replace github.com/cisco-ie/nx-telemetry-proto => github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc + +// replaced due to open PR updating protobuf https://github.com/riemann/riemann-go-client/pull/27 +replace github.com/riemann/riemann-go-client => github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 diff --git a/go.sum b/go.sum index b6541e1420037..3f4bacf7498a2 100644 --- a/go.sum +++ b/go.sum @@ -149,8 +149,9 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -321,7 +322,6 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.38.3 h1:QCL/le04oAz2jELMRSuJVjGT7H+4hhoQc66eMPCfU/k= github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -331,25 +331,29 @@ github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAP github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.9.1 h1:ZbovGV/qo40nrOJ4q8G33AGICzaPI45FHQWJ9650pF4= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= -github.com/aws/aws-sdk-go-v2/config v1.8.2 h1:Dqy4ySXFmulRmZhfynm/5CD4Y6aXiTVhDtXLIuUe/r0= github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= -github.com/aws/aws-sdk-go-v2/credentials v1.4.2 h1:8kVE4Og6wlhVrMGiORQ3p9gRj2exjzhFRB+QzWBUa5Q= github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1 h1:Nm+BxqBtT0r+AnD6byGMCGT4Km0QwHBy8mAYptNPXY4= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= @@ -361,8 +365,9 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6 github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3 h1:NnXJXUz7oihrSlPKEM0yZ19b+7GQ47MX/LluLlEyE/Y= github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.5.2 h1:B120/boLr82yRaQFEPn9u01OwWMnc+xGvz5SOHfBrHY= @@ -386,8 +391,9 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.1/go.mod h1:PIS github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1 h1:APEjhKZLFlNVLATnA/TJyA+w1r/xd5r5ACWBDZ9aIvc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= @@ -399,13 +405,15 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0d github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= -github.com/aws/aws-sdk-go-v2/service/sso v1.4.1 h1:RfgQyv3bFT2Js6XokcrNtTjQ6wAVBRpoCgTFsypihHA= github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= -github.com/aws/aws-sdk-go-v2/service/sts v1.7.1 h1:7ce9ugapSgBapwLhg7AJTqKW5U92VRX3vX65k2tsB+g= github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2 h1:1s/RRA5Owuz4/G/eWCdCKgC+9zaz2vxFsRSwe7R3cPY= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.3.2/go.mod h1:XoDkdZ5pBf2za2GWbFHQ8Ps0K8fRbmbwrHh7PF5xnzQ= @@ -495,8 +503,6 @@ github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -504,6 +510,7 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -720,6 +727,8 @@ github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1: github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754 h1:aDtw0/++yjOoiXB9sldaFYW61mK3m6ia/wYWxPLrwYY= +github.com/dstrand1/riemann-go-client v0.5.1-0.20211028194734-b5eb11fb5754/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -739,8 +748,8 @@ github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/P github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqKsCk= github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= -github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -754,6 +763,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -827,8 +837,9 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -1031,7 +1042,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20170307001533-c9c7427a2a70/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1136,8 +1146,9 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2 h1:OtFKr0Kwe1oLpMR+uNMh/DPgC5fxAq4xRe6HBv8LDqQ= -github.com/gopcua/opcua v0.2.0-rc2.0.20210409063412-baabb9b14fd2/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= +github.com/gopcua/opcua v0.2.3 h1:K5SW2o+vNga62J2PL5GQmWqYQHiZPV/+EKPetarVFQM= +github.com/gopcua/opcua v0.2.3/go.mod h1:GtgfiXLQVXu72KtHZnWNu4JHlMPKqPSOd+pmngEGLWE= +github.com/gophercloud/gophercloud v0.16.0 h1:sWjPfypuzxRxjVbk3/MsU4H8jS0NNlyauZtIUl78BPU= github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= @@ -1161,8 +1172,8 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gosnmp/gosnmp v1.32.0 h1:gctewmZx5qFI0oHMzRnjETqIZ093d9NgZy9TQr3V0iA= -github.com/gosnmp/gosnmp v1.32.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvNDMpgF0= +github.com/gosnmp/gosnmp v1.33.0 h1:WNwN5Rj/9Y70VplIKXuaUiYVxdcaXhfAuLElKx4lnpU= +github.com/gosnmp/gosnmp v1.33.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= @@ -1190,6 +1201,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BM github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf h1:xSjgqa6SiBaSC4sTC4HniWRLww2vbl3u0KyMUYeryJI= +github.com/gwos/tcg/sdk v0.0.0-20211130162655-32ad77586ccf/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= @@ -1292,12 +1305,12 @@ github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7m github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= -github.com/influxdata/influxdb-observability/common v0.2.7 h1:C+oDh8Kbw+Ykx9yog/uJXL27rwMN3hgTLQfAFg1eQO0= -github.com/influxdata/influxdb-observability/common v0.2.7/go.mod h1:+8VMGrfWZnXjc1c/oP+N4O/sHoneWgN3ojAHwgYgV4A= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7 h1:YIXH+qNQgAtTA5U3s/wxDxxh5Vz+ylhZhyuRxtfTBqs= -github.com/influxdata/influxdb-observability/influx2otel v0.2.7/go.mod h1:ASyDMoPChvIgbEOvghwc5NxngOgXThp9MFKs7efNLtQ= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7 h1:FACov3tcGCKfEGXsyUbgUOQx3zXffXaCFbN3ntAzh1E= -github.com/influxdata/influxdb-observability/otel2influx v0.2.7/go.mod h1:tE3OSy4RyAHIjxYlFZBsWorEM3aqaUeqSx3mbacm8KI= +github.com/influxdata/influxdb-observability/common v0.2.8 h1:QDvX7rNQkt1mHr2v8sw/OEupa32CxZHlO5f/tsyPCLw= +github.com/influxdata/influxdb-observability/common v0.2.8/go.mod h1:N2wfkPgJvi9CPK6MbNFkD70naEUxAMGCqFyxZXCJQDs= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8 h1:XlVo4WLIFByOADn+88hPmR2SGJkdLppyIbw1BG2obp8= +github.com/influxdata/influxdb-observability/influx2otel v0.2.8/go.mod h1:t9LeYL1mBiVRZBt5TfIj+4MBkJ/1POBxUlKSxEA+uj8= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8 h1:vTamg9mKUXHaXPtydrR1ejpqj/OKAGc56MiedXjlsnA= +github.com/influxdata/influxdb-observability/otel2influx v0.2.8/go.mod h1:xKTR9GLOtkSekysDKhAFNrPYpeiFV31Sy6zDqF54axA= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= @@ -1312,6 +1325,8 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E= +github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -1473,8 +1488,6 @@ github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= @@ -1521,6 +1534,8 @@ github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= @@ -1528,6 +1543,8 @@ github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= @@ -1684,21 +1701,18 @@ github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5w github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU= -github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2 h1:ejVCLO8gu6/4bOKIHQpmB5UhhUJfAQw55yvLWpfmKjI= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/jwt/v2 v2.1.0 h1:1UbfD5g1xTdWmSeRV8bh/7u+utTiBsRtWhLl1PixZp4= +github.com/nats-io/jwt/v2 v2.1.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.2.6 h1:FPK9wWx9pagxcw14s8W9rlfzfyHm61uNLnJyybZbn48= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= +github.com/nats-io/nats-server/v2 v2.6.5 h1:VTG8gdSw4bEqMwKudOHkBLqGwNpNaJOwruj3+rquQlQ= +github.com/nats-io/nats-server/v2 v2.6.5/go.mod h1:LlMieumxNUnCloOTVFv7Wog0YnasScxARUMXVXv9/+M= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.11.0 h1:L263PZkrmkRJRJT2YHU8GwWWvEvmr9/LUKuJTXsF32k= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483 h1:GMx3ZOcMEVM5qnUItQ4eJyQ6ycwmIEB/VC/UxvdevE0= +github.com/nats-io/nats.go v1.13.1-0.20211018182449-f2416a8b1483/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= -github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= @@ -1966,8 +1980,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/riemann/riemann-go-client v0.5.0 h1:yPP7tz1vSYJkSZvZFCsMiDsHHXX57x8/fEX3qyEXuAA= -github.com/riemann/riemann-go-client v0.5.0/go.mod h1:FMiaOL8dgBnRfgwENzV0xlYJ2eCbV1o7yqVwOBLbShQ= github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= @@ -2011,6 +2023,8 @@ github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc h1:9RAsqOFf0U5CuwXR/Jff3nXTv6tAQNN7U4A/2cBRXFc= +github.com/sbezverk/nx-telemetry-proto v0.0.0-20210629125746-3c19a51b1abc/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -2025,10 +2039,11 @@ github.com/sensu/sensu-go/api/core/v2 v2.9.0 h1:NanHMIWbrHP/L4Ge0V1x2+0G9bxFHpvh github.com/sensu/sensu-go/api/core/v2 v2.9.0/go.mod h1:QcgxKxydmScE66hLBTzbFhhiPSR/JHqUjNi/+Lelh6E= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible h1:OloQyEerMi7JUrXiNzy8wQ5XN+baemxSl12QgIzt0jc= github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v3.21.8+incompatible h1:sh0foI8tMRlCidUJR+KzqWYWxrkuuPIGiO6Vp+KXdCU= -github.com/shirou/gopsutil v3.21.8+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -2304,8 +2319,8 @@ go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= -go.mongodb.org/mongo-driver v1.5.3 h1:wWbFB6zaGHpzguF3f7tW94sVE8sFl3lHx8OZx/4OuFI= -go.mongodb.org/mongo-driver v1.5.3/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.7.3 h1:G4l/eYY9VrQAK/AUgkV0koQKzQnyddnWxrd/Etf0jIs= +go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -2319,27 +2334,27 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.28.0 h1:XmRwoSj3HZtC7O/12fBoQ9DInvwBwFHgHLZrwNxNjQY= go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= -go.opentelemetry.io/collector/model v0.35.0 h1:NpKjghiqlei4ecwjOYOMhD6tj4gY8yiWHPJmbFs/ArI= -go.opentelemetry.io/collector/model v0.35.0/go.mod h1:+7YCSjJG+MqiIFjauzt7oM2qkqBsaJWh5hcsO4fwsAc= +go.opentelemetry.io/collector/model v0.37.0 h1:K1G6bgzBZ5kKSjZ1+EY9MhCOYsac4Q1K85fBUgpTVH8= +go.opentelemetry.io/collector/model v0.37.0/go.mod h1:ESh1oWDNdS4fTg9sTFoYuiuvs8QuaX8yNGTPix3JZc8= go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= -go.opentelemetry.io/otel v1.0.0-RC3 h1:kvwiyEkiUT/JaadXzVLI/R1wDO934A7r3Bs2wEe6wqA= -go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0 h1:vKIEsT6IJU0NYd+iZccjgCmk80zsa7dTiC2Bu7U1jz0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.23.0/go.mod h1:pe9oOWRaZyapdajWCn64fnl76v3cmTEmNBgh7MkKvwE= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0 h1:JSsJID+KU3G8wxynfHIlWaefOvYngDjnrmtHOGb1sb0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.23.0/go.mod h1:aSP5oMNaAfOYq+sRydHANZ0vBYLyZR/3lR9pru9aPLk= -go.opentelemetry.io/otel/internal/metric v0.23.0 h1:mPfzm9Iqhw7G2nDBmUAjFTfPqLZPbOW2k7QI57ITbaI= -go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= -go.opentelemetry.io/otel/metric v0.23.0 h1:mYCcDxi60P4T27/0jchIDFa1WHEfQeU3zH9UEMpnj2c= -go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= -go.opentelemetry.io/otel/sdk v1.0.0-RC3 h1:iRMkET+EmJUn5mW0hJzygBraXRmrUwzbOtNvTCh/oKs= -go.opentelemetry.io/otel/sdk v1.0.0-RC3/go.mod h1:78H6hyg2fka0NYT9fqGuFLvly2yCxiBXDJAgLKo/2Us= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0 h1:7NeoKPPx6NdZBVHLEp/LY5Lq85Ff1WNZnuJkuRy+azw= -go.opentelemetry.io/otel/sdk/export/metric v0.23.0/go.mod h1:SuMiREmKVRIwFKq73zvGTvwFpxb/ZAYkMfyqMoOtDqs= -go.opentelemetry.io/otel/sdk/metric v0.23.0 h1:xlZhPbiue1+jjSFEth94q9QCmX8Q24mOtue9IAmlVyI= -go.opentelemetry.io/otel/sdk/metric v0.23.0/go.mod h1:wa0sKK13eeIFW+0OFjcC3S1i7FTRRiLAXe1kjBVbhwg= -go.opentelemetry.io/otel/trace v1.0.0-RC3 h1:9F0ayEvlxv8BmNmPbU005WK7hC+7KbOazCPZjNa1yME= -go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0 h1:NN6n2agAkT6j2o+1RPTFANclOnZ/3Z1ruRGL06NYACk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.24.0/go.mod h1:kgWmavsno59/h5l9A9KXhvqrYxBhiQvJHPNhJkMP46s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0 h1:QyIh7cAMItlzm8xQn9c6QxNEMUbYgXPx19irR/pmgdI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.24.0/go.mod h1:BpCT1zDnUgcUc3VqFVkxH/nkx6cM8XlCPsQsxaOzUNM= +go.opentelemetry.io/otel/internal/metric v0.24.0 h1:O5lFy6kAl0LMWBjzy3k//M8VjEaTDWL9DPJuqZmWIAA= +go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= +go.opentelemetry.io/otel/metric v0.24.0 h1:Rg4UYHS6JKR1Sw1TxnI13z7q/0p/XAbgIqUTagvLJuU= +go.opentelemetry.io/otel/metric v0.24.0/go.mod h1:tpMFnCD9t+BEGiWY2bWF5+AwjuAdM0lSowQ4SBA3/K4= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0 h1:innKi8LQebwPI+WEuEKEWMjhWC5mXQG1/WpSm5mffSY= +go.opentelemetry.io/otel/sdk/export/metric v0.24.0/go.mod h1:chmxXGVNcpCih5XyniVkL4VUyaEroUbOdvjVlQ8M29Y= +go.opentelemetry.io/otel/sdk/metric v0.24.0 h1:LLHrZikGdEHoHihwIPvfFRJX+T+NdrU2zgEqf7tQ7Oo= +go.opentelemetry.io/otel/sdk/metric v0.24.0/go.mod h1:KDgJgYzsIowuIDbPM9sLDZY9JJ6gqIDWCx92iWV8ejk= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= @@ -2603,6 +2618,7 @@ golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2728,10 +2744,11 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= -golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -3036,8 +3053,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/internal/internal.go b/internal/internal.go index 4441e9acfbf03..49f92bfcd1265 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -42,6 +42,10 @@ func SetVersion(v string) error { return ErrorVersionAlreadySet } version = v + if version == "" { + version = "unknown" + } + return nil } diff --git a/internal/snmp/config.go b/internal/snmp/config.go index 0a200b7067787..4ad1d3a0cd3e3 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -10,6 +10,8 @@ type ClientConfig struct { Retries int `toml:"retries"` // Values: 1, 2, 3 Version uint8 `toml:"version"` + // Path to mib files + Path []string `toml:"path"` // Parameters for Version 1 & 2 Community string `toml:"community"` diff --git a/internal/snmp/translate.go b/internal/snmp/translate.go new file mode 100644 index 0000000000000..a452d0a840c9b --- /dev/null +++ b/internal/snmp/translate.go @@ -0,0 +1,217 @@ +package snmp + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" +) + +// must init, append path for each directory, load module for every file +// or gosmi will fail without saying why +var m sync.Mutex +var once sync.Once +var cache = make(map[string]bool) + +func appendPath(path string) { + m.Lock() + defer m.Unlock() + + gosmi.AppendPath(path) +} + +func loadModule(path string) error { + m.Lock() + defer m.Unlock() + + _, err := gosmi.LoadModule(path) + return err +} + +func ClearCache() { + cache = make(map[string]bool) +} + +func LoadMibsFromPath(paths []string, log telegraf.Logger) error { + once.Do(gosmi.Init) + + for _, mibPath := range paths { + folders := []string{} + + // Check if we loaded that path already and skip it if so + m.Lock() + cached := cache[mibPath] + cache[mibPath] = true + m.Unlock() + if cached { + continue + } + + appendPath(mibPath) + folders = append(folders, mibPath) + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + // symlinks are files so we need to double check if any of them are folders + // Will check file vs directory later on + if info.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(path) + if err != nil { + log.Warnf("Bad symbolic link %v", link) + } + folders = append(folders, link) + } + return nil + }) + if err != nil { + return fmt.Errorf("Filepath could not be walked: %v", err) + } + + for _, folder := range folders { + err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { + // checks if file or directory + if info.IsDir() { + appendPath(path) + } else if info.Mode()&os.ModeSymlink == 0 { + if err := loadModule(info.Name()); err != nil { + log.Warn(err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("Filepath could not be walked: %v", err) + } + } + } + return nil +} + +// The following is for snmp_trap +type MibEntry struct { + MibName string + OidText string +} + +func TrapLookup(oid string) (e MibEntry, err error) { + var node gosmi.SmiNode + node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + + // ensure modules are loaded or node will be empty (might not error) + if err != nil { + return e, err + } + + e.OidText = node.RenderQualified() + + i := strings.Index(e.OidText, "::") + if i == -1 { + return e, fmt.Errorf("not found") + } + e.MibName = e.OidText[:i] + e.OidText = e.OidText[i+2:] + return e, nil +} + +// The following is for snmp + +func GetIndex(oidNum string, mibPrefix string) (col []string, tagOids map[string]struct{}, err error) { + // first attempt to get the table's tags + tagOids = map[string]struct{}{} + + // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName + node, err := gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + + if err != nil { + return []string{}, map[string]struct{}{}, fmt.Errorf("getting submask: %w", err) + } + + for _, index := range node.GetIndex() { + //nolint:staticcheck //assaignment to nil map to keep backwards compatibilty + tagOids[mibPrefix+index.Name] = struct{}{} + } + + // grabs all columns from the table + // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName + col = node.GetRow().AsTable().ColumnOrder + + return col, tagOids, nil +} + +//nolint:revive //Too many return variable but necessary +func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + var out gosmi.SmiNode + var end string + if strings.ContainsAny(oid, "::") { + // split given oid + // for example RFC1213-MIB::sysUpTime.0 + s := strings.Split(oid, "::") + // node becomes sysUpTime.0 + node := s[1] + if strings.ContainsAny(node, ".") { + s = strings.Split(node, ".") + // node becomes sysUpTime + node = s[0] + end = "." + s[1] + } + + out, err = gosmi.GetNode(node) + if err != nil { + return oid, oid, oid, oid, err + } + + oidNum = "." + out.RenderNumeric() + end + } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { + //handle mixed oid ex. .iso.2.3 + s := strings.Split(oid, ".") + for i := range s { + if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { + out, err = gosmi.GetNode(s[i]) + if err != nil { + return oid, oid, oid, oid, err + } + s[i] = out.RenderNumeric() + } + } + oidNum = strings.Join(s, ".") + out, _ = gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + } else { + out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + oidNum = oid + // ensure modules are loaded or node will be empty (might not error) + // do not return the err as the oid is numeric and telegraf can continue + //nolint:nilerr + if err != nil || out.Name == "iso" { + return oid, oid, oid, oid, nil + } + } + + tc := out.GetSubtree() + + for i := range tc { + // case where the mib doesn't have a conversion so Type struct will be nil + // prevents seg fault + if tc[i].Type == nil { + break + } + switch tc[i].Type.Name { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": + conversion = "ipaddr" + } + } + + oidText = out.RenderQualified() + i := strings.Index(oidText, "::") + if i == -1 { + return "", oid, oid, oid, fmt.Errorf("not found") + } + mibName = oidText[:i] + oidText = oidText[i+2:] + end + + return mibName, oidNum, oidText, conversion, nil +} diff --git a/internal/usage.go b/internal/usage.go index 916b5cb86e908..a650a4854b3cd 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -17,13 +17,14 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files - --watch-config Telegraf will restart on local config changes. Monitor changes - using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. Monitoring is off by default. --plugin-directory directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced. --debug turn on debug logging + --deprecation-list print all deprecated plugins or plugin options. --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. --output-filter filter the outputs to enable, separator is : @@ -37,9 +38,10 @@ The commands & flags are: 'processors', 'aggregators' and 'inputs' --sample-config print out full sample configuration --once enable once mode: gather metrics once, write them, and exit - --test enable test mode: gather metrics once and print them - --test-wait wait up to this many seconds for service - inputs to complete in test or once mode + --test enable test mode: gather metrics once and print them. + No outputs are executed! + --test-wait wait up to this many seconds for service inputs to complete + in test or once mode. Implies --test if not used with --once. --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit diff --git a/plugin.go b/plugin.go index f9dcaeac0344c..3f4004d766457 100644 --- a/plugin.go +++ b/plugin.go @@ -2,6 +2,16 @@ package telegraf var Debug bool +// DeprecationInfo contains information for marking a plugin deprecated. +type DeprecationInfo struct { + // Since specifies the version since when the plugin is deprecated + Since string + // RemovalIn optionally specifies the version when the plugin is scheduled for removal + RemovalIn string + // Notice for the user on suggested replacements etc. + Notice string +} + // Initializer is an interface that all plugin types: Inputs, Outputs, // Processors, and Aggregators can optionally implement to initialize the // plugin. diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index 20d5b5ea2e482..c3a6f274b426d 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -9,5 +9,6 @@ import ( _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" + _ "github.com/influxdata/telegraf/plugins/aggregators/starlark" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index f13dd8f375682..ede108ec57d90 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -3,7 +3,7 @@ The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values, emitting the aggregate every `period` seconds. -### Configuration: +## Configuration ```toml # Keep the aggregate basicstats of each metric passing through. @@ -20,32 +20,32 @@ emitting the aggregate every `period` seconds. ``` - stats - - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. - - If empty array, no stats are aggregated + - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. + - If empty array, no stats are aggregated -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_count - - field1_diff (difference) - - field1_rate (rate per second) - - field1_max - - field1_min - - field1_mean - - field1_non_negative_diff (non-negative difference) - - field1_non_negative_rate (non-negative rate per second) - - field1_sum - - field1_s2 (variance) - - field1_stdev (standard deviation) - - field1_interval (interval in nanoseconds) - -### Tags: + - field1_count + - field1_diff (difference) + - field1_rate (rate per second) + - field1_max + - field1_min + - field1_mean + - field1_non_negative_diff (non-negative difference) + - field1_non_negative_rate (non-negative rate per second) + - field1_sum + - field1_s2 (variance) + - field1_stdev (standard deviation) + - field1_interval (interval in nanoseconds) + +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 4ad6c77056314..458a9b9c99560 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -129,7 +129,7 @@ func (b *BasicStats) Add(in telegraf.Metric) { //variable initialization x := fv mean := tmp.mean - M2 := tmp.M2 + m2 := tmp.M2 //counter compute n := tmp.count + 1 tmp.count = n @@ -138,8 +138,8 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean = mean + delta/n tmp.mean = mean //variance/stdev compute - M2 = M2 + delta*(x-mean) - tmp.M2 = M2 + m2 = m2 + delta*(x-mean) + tmp.M2 = m2 //max/min compute if fv < tmp.min { tmp.min = fv diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 51ecd5c992442..3f08624978446 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var m1 = metric.New("m1", @@ -697,11 +698,11 @@ func TestBasicStatsWithDefaultStats(t *testing.T) { acc := testutil.Accumulator{} aggregator.Push(&acc) - assert.True(t, acc.HasField("m1", "a_count")) - assert.True(t, acc.HasField("m1", "a_min")) - assert.True(t, acc.HasField("m1", "a_max")) - assert.True(t, acc.HasField("m1", "a_mean")) - assert.True(t, acc.HasField("m1", "a_stdev")) - assert.True(t, acc.HasField("m1", "a_s2")) - assert.False(t, acc.HasField("m1", "a_sum")) + require.True(t, acc.HasField("m1", "a_count")) + require.True(t, acc.HasField("m1", "a_min")) + require.True(t, acc.HasField("m1", "a_max")) + require.True(t, acc.HasField("m1", "a_mean")) + require.True(t, acc.HasField("m1", "a_stdev")) + require.True(t, acc.HasField("m1", "a_s2")) + require.False(t, acc.HasField("m1", "a_sum")) } diff --git a/plugins/aggregators/deprecations.go b/plugins/aggregators/deprecations.go new file mode 100644 index 0000000000000..dd2302e0255c3 --- /dev/null +++ b/plugins/aggregators/deprecations.go @@ -0,0 +1,6 @@ +package aggregators + +import "github.com/influxdata/telegraf" + +// Deprecations lists the deprecated plugins +var Deprecations = map[string]telegraf.DeprecationInfo{} diff --git a/plugins/aggregators/derivative/README.md b/plugins/aggregators/derivative/README.md index 3ca29c36d4f49..6d47dc4c1850f 100644 --- a/plugins/aggregators/derivative/README.md +++ b/plugins/aggregators/derivative/README.md @@ -1,42 +1,47 @@ # Derivative Aggregator Plugin + The Derivative Aggregator Plugin estimates the derivative for all fields of the aggregated metrics. -### Time Derivatives +## Time Derivatives In its default configuration it determines the first and last measurement of the period. From these measurements the time difference in seconds is calculated. This time difference is than used to divide the difference of each field using the following formula: -``` + +```text field_last - field_first derivative = -------------------------- time_difference ``` + For each field the derivative is emitted with a naming pattern `_rate`. -### Custom Derivation Variable +## Custom Derivation Variable The plugin supports to use a field of the aggregated measurements as derivation variable in the denominator. This variable is assumed to be a monotonically increasing value. In this feature the following formula is used: -``` + +```text field_last - field_first derivative = -------------------------------- variable_last - variable_first ``` + **Make sure the specified variable is not filtered and exists in the metrics passed to this aggregator!** -When using a custom derivation variable, you should change the `suffix` of the derivative name. +When using a custom derivation variable, you should change the `suffix` of the derivative name. See the next section on [customizing the derivative name](#customize-the-derivative-name) for details. -### Customize the Derivative Name +## Customize the Derivative Name The derivatives generated by the aggregator are named `_rate`, i.e. they are composed of the field name and a suffix `_rate`. You can configure the suffix to be used by changing the `suffix` parameter. -### Roll-Over to next Period +## Roll-Over to next Period Calculating the derivative for a period requires at least two distinct measurements during that period. Whether those are available depends on the configuration of the aggregator `period` and the agent `interval`. @@ -47,7 +52,7 @@ replace the roll-over metric. A main benefit of this roll-over is the ability to cope with multiple "quiet" periods, where no new measurement is pushed to the aggregator. The roll-over will take place at most `max_roll_over` times. -#### Example of Roll-Over +### Example of Roll-Over Let us assume we have an input plugin, that generates a measurement with a single metric "test" every 2 seconds. Let this metric increase the first 10 seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0: @@ -111,18 +116,18 @@ To illustrate this, let us compare the derivatives for `period = "7s"`. | timestamp | value | `max_roll_over = 0` | `max_roll_over = 1` | |-----------|-------|-----------|--------------| | 0 | 0.0 | -| 2 | 2.0 | -| 4 | 4.0 | -| 6 | 6.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | ||| 1.0 | 1.0 | | 8 | 8.0 | -| 10 | 10.0 | -| 12 | 8.0 | -||| 0.0 | 0.33... | -| 14 | 6.0 | +| 10 | 10.0 | +| 12 | 8.0 | +||| 0.0 | 0.33... | +| 14 | 6.0 | | 16 | 4.0 | -| 18 | 2.0 | -| 20 | 0.0 | +| 18 | 2.0 | +| 20 | 0.0 | ||| -1.0 | -1.0 | The difference stems from the change of the value between periods, e.g. from 6.0 to 8.0 between first and second period. @@ -130,7 +135,7 @@ Thoses changes are omitted with `max_roll_over = 0` but are respected with `max_ That there are no more differences in the calculated derivatives is due to the example data, which has constant derivatives in during the first and last period, even when including the gap between the periods. Using `max_roll_over` with a value greater 0 may be important, if you need to detect changes between periods, e.g. when you have very few measurements in a period or quasi-constant metrics with only occasional changes. -### Configuration +## Configuration ```toml [[aggregators.derivative]] @@ -151,13 +156,14 @@ Using `max_roll_over` with a value greater 0 may be important, if you need to de period = "30s" ``` -### Tags: +### Tags + No tags are applied by this aggregator. Existing tags are passed throug the aggregator untouched. -### Example Output +## Example Output -``` +```text net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000 net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000 net bytes_recv_by_packets_recv=292.89 1508843660000000000 diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go index fb84dae6ff54a..e0c91767018ef 100644 --- a/plugins/aggregators/derivative/derivative_test.go +++ b/plugins/aggregators/derivative/derivative_test.go @@ -4,6 +4,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -40,7 +42,8 @@ func TestTwoFullEventsWithParameter(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Add(finish) @@ -66,7 +69,8 @@ func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(finish) derivative.Add(start) @@ -88,7 +92,8 @@ func TestTwoFullEventsWithoutParameter(t *testing.T) { acc := testutil.Accumulator{} derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() duration, _ := time.ParseDuration("2s") @@ -130,7 +135,8 @@ func TestTwoFullEventsInSeperatePushes(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -163,7 +169,8 @@ func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -195,7 +202,8 @@ func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) // This test relies on RunningAggregator always callining Reset after Push @@ -220,7 +228,8 @@ func TestIgnoresMissingVariable(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) noParameter := metric.New("TestMetric", map[string]string{"state": "no_parameter"}, @@ -260,7 +269,8 @@ func TestMergesDifferenMetricsWithSameHash(t *testing.T) { acc := testutil.Accumulator{} derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() duration, _ := time.ParseDuration("2s") @@ -309,7 +319,8 @@ func TestDropsAggregatesOnMaxRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), } derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -332,7 +343,8 @@ func TestAddMetricsResetsRollOver(t *testing.T) { cache: make(map[uint64]*aggregate), Log: testutil.Logger{}, } - derivative.Init() + err := derivative.Init() + require.NoError(t, err) derivative.Add(start) derivative.Push(&acc) @@ -356,7 +368,8 @@ func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { period, _ := time.ParseDuration("10s") derivative := NewDerivative() derivative.Log = testutil.Logger{} - derivative.Init() + err := derivative.Init() + require.NoError(t, err) startTime := time.Now() first := metric.New("One Field", diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md index 444746d784349..10dc72139effd 100644 --- a/plugins/aggregators/final/README.md +++ b/plugins/aggregators/final/README.md @@ -11,7 +11,7 @@ discrete time series such as procstat, cgroup, kubernetes etc. When a series has not been updated within the time defined in `series_timeout`, the last metric is emitted with the `_final` appended. -### Configuration +## Configuration ```toml [[aggregators.final]] @@ -25,20 +25,21 @@ When a series has not been updated within the time defined in series_timeout = "5m" ``` -### Metrics +## Metrics Measurement and tags are unchanged, fields are emitted with the suffix `_final`. -### Example Output +## Example Output -``` +```text counter,host=bar i_final=3,j_final=6 1554281635115090133 counter,host=foo i_final=3,j_final=6 1554281635112992012 ``` Original input: -``` + +```text counter,host=bar i=1,j=4 1554281633101153300 counter,host=foo i=1,j=4 1554281633099323601 counter,host=bar i=2,j=5 1554281634107980073 diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index f0b6c15b11804..5fd56f1fbc345 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -12,7 +12,7 @@ By default bucket counts are not reset between periods and will be non-strictly increasing while Telegraf is running. This behavior can be changed by setting the `reset` parameter to true. -#### Design +## Design Each metric is passed to the aggregator and this aggregator searches histogram buckets for those fields, which have been specified in the @@ -24,7 +24,7 @@ The algorithm of hit counting to buckets was implemented on the base of the algorithm which is implemented in the Prometheus [client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go). -### Configuration +## Configuration ```toml # Configuration for aggregate histogram metrics @@ -73,40 +73,39 @@ boundaries. Each float value defines the inclusive upper (right) bound of the b The `+Inf` bucket is added automatically and does not need to be defined. (For left boundaries, these specified bucket borders and `-Inf` will be used). -### Measurements & Fields: +## Measurements & Fields The postfix `bucket` will be added to each field key. - measurement1 - - field1_bucket - - field2_bucket + - field1_bucket + - field2_bucket -### Tags: +### Tags -* `cumulative = true` (default): - * `le`: Right bucket border. It means that the metric value is less than or +- `cumulative = true` (default): + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. If a metric value is sorted into a bucket, it is also sorted into all larger buckets. As a result, the value of `_bucket` is rising with rising `le` value. When `le` is `+Inf`, the bucket value is the count of all metrics, because all metric values are less than or equal to positive infinity. -* `cumulative = false`: - * `gt`: Left bucket border. It means that the metric value is greater than +- `cumulative = false`: + - `gt`: Left bucket border. It means that the metric value is greater than (and not equal to) the value of this tag. - * `le`: Right bucket border. It means that the metric value is less than or + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. - * As both `gt` and `le` are present, each metric is sorted in only exactly - one bucket. + - As both `gt` and `le` are present, each metric is sorted in only exactly + one bucket. - -### Example Output: +## Example Output Let assume we have the buckets [0, 10, 50, 100] and the following field values for `usage_idle`: [50, 7, 99, 12] With `cumulative = true`: -``` +```text cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12 @@ -116,7 +115,7 @@ cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # With `cumulative = false`: -``` +```text cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12 diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index c2a05cc283c3d..ad24d5b338528 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) type fields map[string]interface{} @@ -82,9 +83,7 @@ func TestHistogram(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) @@ -106,9 +105,7 @@ func TestHistogramNonCumulative(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"}) @@ -130,9 +127,7 @@ func TestHistogramWithReset(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"}) @@ -155,10 +150,7 @@ func TestHistogramWithAllFields(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) @@ -188,10 +180,7 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"}) @@ -241,7 +230,7 @@ func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { func TestWrongBucketsOrder(t *testing.T) { defer func() { if r := recover(); r != nil { - assert.Equal( + require.Equal( t, "histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a", fmt.Sprint(r), @@ -291,12 +280,9 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa } // check fields with their counts - if assert.Equal(t, fields, checkedMetric.Fields) { - return - } - - assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName)) + require.Equal(t, fields, checkedMetric.Fields) + return } - assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) + require.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) } diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md index 89f7f0983c692..79cad5cfb4bd3 100644 --- a/plugins/aggregators/merge/README.md +++ b/plugins/aggregators/merge/README.md @@ -7,7 +7,7 @@ Use this plugin when fields are split over multiple metrics, with the same measurement, tag set and timestamp. By merging into a single metric they can be handled more efficiently by the output. -### Configuration +## Configuration ```toml [[aggregators.merge]] @@ -16,7 +16,7 @@ be handled more efficiently by the output. drop_original = true ``` -### Example +## Example ```diff - cpu,host=localhost usage_time=42 1567562620000000000 diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 94e54590b586f..53a55f0853e1a 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -229,7 +229,8 @@ var m2 = metric.New( func BenchmarkMergeOne(b *testing.B) { var merger Merge - merger.Init() + err := merger.Init() + require.NoError(b, err) var acc testutil.NopAccumulator for n := 0; n < b.N; n++ { @@ -241,7 +242,8 @@ func BenchmarkMergeOne(b *testing.B) { func BenchmarkMergeTwo(b *testing.B) { var merger Merge - merger.Init() + err := merger.Init() + require.NoError(b, err) var acc testutil.NopAccumulator for n := 0; n < b.N; n++ { diff --git a/plugins/aggregators/minmax/README.md b/plugins/aggregators/minmax/README.md index f7405b78cbe9d..fefd2f2e2e165 100644 --- a/plugins/aggregators/minmax/README.md +++ b/plugins/aggregators/minmax/README.md @@ -3,7 +3,7 @@ The minmax aggregator plugin aggregates min & max values of each field it sees, emitting the aggrate every `period` seconds. -### Configuration: +## Configuration ```toml # Keep the aggregate min/max of each metric passing through. @@ -16,19 +16,19 @@ emitting the aggrate every `period` seconds. drop_original = false ``` -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_max - - field1_min + - field1_max + - field1_min -### Tags: +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1.72 1475583980000000000 system,host=tars load1=1.6 1475583990000000000 diff --git a/plugins/aggregators/quantile/README.md b/plugins/aggregators/quantile/README.md index 77d0f856409ec..423857465c18d 100644 --- a/plugins/aggregators/quantile/README.md +++ b/plugins/aggregators/quantile/README.md @@ -3,7 +3,7 @@ The quantile aggregator plugin aggregates specified quantiles for each numeric field per metric it sees and emits the quantiles every `period`. -### Configuration +## Configuration ```toml [[aggregators.quantile]] @@ -33,8 +33,10 @@ per metric it sees and emits the quantiles every `period`. # compression = 100.0 ``` -#### Algorithm types -##### t-digest +## Algorithm types + +### t-digest + Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a special data-structure to cluster data. These clusters are later used to approximate the requested quantiles. The bounds of the approximation @@ -47,7 +49,8 @@ where exact quantile calculation isn't required. For implementation details see the underlying [golang library][tdigest_lib]. -##### exact R7 and R8 +### exact R7 and R8 + These algorithms compute quantiles as described in [Hyndman & Fan (1996)][hyndman_fan]. The R7 variant is used in Excel and NumPy. The R8 variant is recommended by Hyndman & Fan due to its independence of the underlying sample distribution. @@ -57,8 +60,8 @@ a lot of memory when used with a large number of series or a large number of samples. They are slower than the `t-digest` algorithm and are recommended only to be used with a small number of samples and series. +## Benchmark (linux/amd64) -#### Benchmark (linux/amd64) The benchmark was performed by adding 100 metrics with six numeric (and two non-numeric) fields to the aggregator and the derive the aggregation result. @@ -72,7 +75,8 @@ result. | exact R7 | 100 | 7868816 ns/op | | exact R8 | 100 | 8099612 ns/op | -### Measurements +## Measurements + Measurement names are passed trough this aggregator. ### Fields @@ -82,6 +86,7 @@ fields are aggregated in the form `_`. Other field types (e.g. boolean, string) are ignored and dropped from the output. For example passing in the following metric as *input*: + - somemetric - average_response_ms (float64) - minimum_response_ms (float64) @@ -89,7 +94,8 @@ For example passing in the following metric as *input*: - status (string) - ok (boolean) -and the default setting for `quantiles ` you get the following *output* +and the default setting for `quantiles` you get the following *output* + - somemetric - average_response_ms_025 (float64) - average_response_ms_050 (float64) @@ -110,18 +116,18 @@ Tags are passed through to the output by this aggregator. ### Example Output -``` +```text cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000 cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000 cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000 cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000 ``` -# References +## References + - Dunning & Ertl: "Computing Extremely Accurate Quantiles Using t-Digests", arXiv:1902.04023 (2019) [pdf][tdigest_paper] - Hyndman & Fan: "Sample Quantiles in Statistical Packages", The American Statistician, vol. 50, pp. 361-365 (1996) [pdf][hyndman_fan] - [tdigest_paper]: https://arxiv.org/abs/1902.04023 [tdigest_lib]: https://github.com/caio/go-tdigest [hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go index 641844f3f4e77..e6d73507a1155 100644 --- a/plugins/aggregators/quantile/algorithms.go +++ b/plugins/aggregators/quantile/algorithms.go @@ -49,8 +49,7 @@ func (e *exactAlgorithmR7) Quantile(q float64) float64 { // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 // Same as Excel and Numpy. - N := float64(size) - n := q * (N - 1) + n := q * (float64(size) - 1) i, gamma := math.Modf(n) j := int(i) if j < 0 { @@ -95,8 +94,7 @@ func (e *exactAlgorithmR8) Quantile(q float64) float64 { // Get the quantile index and the fraction to the neighbor // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 - N := float64(size) - n := q*(N+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper + n := q*(float64(size)+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper i, gamma := math.Modf(n) j := int(i) if j < 0 { diff --git a/plugins/aggregators/starlark/README.md b/plugins/aggregators/starlark/README.md new file mode 100644 index 0000000000000..01bcf963c1258 --- /dev/null +++ b/plugins/aggregators/starlark/README.md @@ -0,0 +1,103 @@ +# Starlark Aggregator + +The `starlark` aggregator allows to implement a custom aggregator plugin with a Starlark script. The Starlark +script needs to be composed of the three methods defined in the Aggregator plugin interface which are `add`, `push` and `reset`. + +The Starlark Aggregator plugin calls the Starlark function `add` to add the metrics to the aggregator, then calls the Starlark function `push` to push the resulting metrics into the accumulator and finally calls the Starlark function `reset` to reset the entire state of the plugin. + +The Starlark functions can use the global function `state` to keep temporary the metrics to aggregate. + +The Starlark language is a dialect of Python, and will be familiar to those who +have experience with the Python language. However, there are major [differences](#python-differences). +Existing Python code is unlikely to work unmodified. The execution environment +is sandboxed, and it is not possible to do I/O operations such as reading from +files or sockets. + +The **[Starlark specification][]** has details about the syntax and available +functions. + +## Configuration + +```toml +[[aggregators.starlark]] + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true +``` + +## Usage + +The Starlark code should contain a function called `add` that takes a metric as argument. +The function will be called with each metric to add, and doesn't return anything. + +```python +def add(metric): + state["last"] = metric +``` + +The Starlark code should also contain a function called `push` that doesn't take any argument. +The function will be called to compute the aggregation, and returns the metrics to push to the accumulator. + +```python +def push(): + return state.get("last") +``` + +The Starlark code should also contain a function called `reset` that doesn't take any argument. +The function will be called to reset the plugin, and doesn't return anything. + +```python +def push(): + state.clear() +``` + +For a list of available types and functions that can be used in the code, see +the [Starlark specification][]. + +## Python Differences + +Refer to the section [Python Differences](plugins/processors/starlark/README.md#python-differences) of the documentation about the Starlark processor. + +## Libraries available + +Refer to the section [Libraries available](plugins/processors/starlark/README.md#libraries-available) of the documentation about the Starlark processor. + +## Common Questions + +Refer to the section [Common Questions](plugins/processors/starlark/README.md#common-questions) of the documentation about the Starlark processor. + +## Examples + +- [minmax](/plugins/aggregators/starlark/testdata/min_max.star) - A minmax aggregator implemented with a Starlark script. +- [merge](/plugins/aggregators/starlark/testdata/merge.star) - A merge aggregator implemented with a Starlark script. + +[All examples](/plugins/aggregators/starlark/testdata) are in the testdata folder. + +Open a Pull Request to add any other useful Starlark examples. + +[Starlark specification]: https://github.com/google/starlark-go/blob/master/doc/spec.md +[dict]: https://github.com/google/starlark-go/blob/master/doc/spec.md#dictionaries diff --git a/plugins/aggregators/starlark/starlark.go b/plugins/aggregators/starlark/starlark.go new file mode 100644 index 0000000000000..2823d1ed73b9b --- /dev/null +++ b/plugins/aggregators/starlark/starlark.go @@ -0,0 +1,144 @@ +package starlark + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" + common "github.com/influxdata/telegraf/plugins/common/starlark" + "go.starlark.net/starlark" +) + +const ( + description = "Aggregate metrics using a Starlark script" + sampleConfig = ` + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true +` +) + +type Starlark struct { + common.StarlarkCommon +} + +func (s *Starlark) Init() error { + // Execute source + err := s.StarlarkCommon.Init() + if err != nil { + return err + } + + // The source should define an add function. + err = s.AddFunction("add", &common.Metric{}) + if err != nil { + return err + } + + // The source should define a push function. + err = s.AddFunction("push") + if err != nil { + return err + } + + // The source should define a reset function. + err = s.AddFunction("reset") + if err != nil { + return err + } + + return nil +} + +func (s *Starlark) SampleConfig() string { + return sampleConfig +} + +func (s *Starlark) Description() string { + return description +} + +func (s *Starlark) Add(metric telegraf.Metric) { + parameters, found := s.GetParameters("add") + if !found { + s.Log.Errorf("The parameters of the add function could not be found") + return + } + parameters[0].(*common.Metric).Wrap(metric) + + _, err := s.Call("add") + if err != nil { + s.LogError(err) + } +} + +func (s *Starlark) Push(acc telegraf.Accumulator) { + rv, err := s.Call("push") + if err != nil { + s.LogError(err) + acc.AddError(err) + return + } + + switch rv := rv.(type) { + case *starlark.List: + iter := rv.Iterate() + defer iter.Done() + var v starlark.Value + for iter.Next(&v) { + switch v := v.(type) { + case *common.Metric: + m := v.Unwrap() + acc.AddMetric(m) + default: + s.Log.Errorf("Invalid type returned in list: %s", v.Type()) + } + } + case *common.Metric: + m := rv.Unwrap() + acc.AddMetric(m) + case starlark.NoneType: + default: + s.Log.Errorf("Invalid type returned: %T", rv) + } +} + +func (s *Starlark) Reset() { + _, err := s.Call("reset") + if err != nil { + s.LogError(err) + } +} + +// init initializes starlark aggregator plugin +func init() { + aggregators.Add("starlark", func() telegraf.Aggregator { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + }, + } + }) +} diff --git a/plugins/aggregators/starlark/starlark_test.go b/plugins/aggregators/starlark/starlark_test.go new file mode 100644 index 0000000000000..a45f9e84cd515 --- /dev/null +++ b/plugins/aggregators/starlark/starlark_test.go @@ -0,0 +1,432 @@ +package starlark + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + common "github.com/influxdata/telegraf/plugins/common/starlark" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var m1 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(1), + "c": int64(1), + "d": int64(1), + "e": int64(1), + "f": int64(2), + "g": int64(2), + "h": int64(2), + "i": int64(2), + "j": int64(3), + }, + time.Now(), +) +var m2 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(3), + "c": int64(3), + "d": int64(3), + "e": int64(3), + "f": int64(1), + "g": int64(1), + "h": int64(1), + "i": int64(1), + "j": int64(1), + "k": int64(200), + "l": int64(200), + "ignoreme": "string", + "andme": true, + }, + time.Now(), +) + +func BenchmarkApply(b *testing.B) { + minmax, _ := newMinMax() + + for n := 0; n < b.N; n++ { + minmax.Add(m1) + minmax.Add(m2) + } +} + +// Test two metrics getting added. +func TestMinMaxWithPeriod(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + + minmax.Add(m1) + minmax.Add(m2) + minmax.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(1), + "c_max": int64(3), + "c_min": int64(1), + "d_max": int64(3), + "d_min": int64(1), + "e_max": int64(3), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(1), + "g_max": int64(2), + "g_min": int64(1), + "h_max": int64(2), + "h_min": int64(1), + "i_max": int64(2), + "i_min": int64(1), + "j_max": int64(3), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test two metrics getting added with a push/reset in between (simulates +// getting added in different periods.) +func TestMinMaxDifferentPeriods(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + minmax.Add(m1) + minmax.Push(&acc) + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(1), + "b_min": int64(1), + "c_max": int64(1), + "c_min": int64(1), + "d_max": int64(1), + "d_min": int64(1), + "e_max": int64(1), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(2), + "g_max": int64(2), + "g_min": int64(2), + "h_max": int64(2), + "h_min": int64(2), + "i_max": int64(2), + "i_min": int64(2), + "j_max": int64(3), + "j_min": int64(3), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) + + acc.ClearMetrics() + minmax.Reset() + minmax.Add(m2) + minmax.Push(&acc) + expectedFields = map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(3), + "c_max": int64(3), + "c_min": int64(3), + "d_max": int64(3), + "d_min": int64(3), + "e_max": int64(3), + "e_min": int64(3), + "f_max": int64(1), + "f_min": int64(1), + "g_max": int64(1), + "g_min": int64(1), + "h_max": int64(1), + "h_min": int64(1), + "i_max": int64(1), + "i_min": int64(1), + "j_max": int64(1), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags = map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func newMinMax() (*Starlark, error) { + return newStarlarkFromScript("testdata/min_max.star") +} + +func TestSimple(t *testing.T) { + plugin, err := newMerge() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestNanosecondPrecision(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + acc.SetPrecision(time.Second) + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 1), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestReset(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + plugin.Reset() + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func newMerge() (*Starlark, error) { + return newStarlarkFromScript("testdata/merge.star") +} + +func TestLastFromSource(t *testing.T) { + acc := testutil.Accumulator{} + plugin, err := newStarlarkFromSource(` +state = {} +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +`) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_idle": 31, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Push(&acc) + expectedFields := map[string]interface{}{ + "time_idle": int64(31), + } + expectedTags := map[string]string{ + "cpu": "cpu2", + } + acc.AssertContainsTaggedFields(t, "cpu", expectedFields, expectedTags) + plugin.Reset() +} + +func newStarlarkFromSource(source string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Source: source, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} + +func newStarlarkFromScript(script string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Script: script, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} diff --git a/plugins/aggregators/starlark/testdata/merge.star b/plugins/aggregators/starlark/testdata/merge.star new file mode 100644 index 0000000000000..77c5148ca9f76 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/merge.star @@ -0,0 +1,31 @@ +# Example of a merge aggregator implemented with a starlark script. +load('time.star', 'time') +state = {} +def add(metric): + metrics = state.get("metrics") + if metrics == None: + metrics = {} + state["metrics"] = metrics + state["ordered"] = [] + gId = groupID(metric) + m = metrics.get(gId) + if m == None: + m = deepcopy(metric) + metrics[gId] = m + state["ordered"].append(m) + else: + for k, v in metric.fields.items(): + m.fields[k] = v + +def push(): + return state.get("ordered") + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + "-" + key = key + "-" + str(metric.time) + return hash(key) \ No newline at end of file diff --git a/plugins/aggregators/starlark/testdata/min_max.star b/plugins/aggregators/starlark/testdata/min_max.star new file mode 100644 index 0000000000000..f8b23355c8e51 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/min_max.star @@ -0,0 +1,53 @@ +# Example of a min_max aggregator implemented with a starlark script. + +supported_types = (["int", "float"]) +state = {} +def add(metric): + gId = groupID(metric) + aggregate = state.get(gId) + if aggregate == None: + aggregate = { + "name": metric.name, + "tags": metric.tags, + "fields": {} + } + for k, v in metric.fields.items(): + if type(v) in supported_types: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + state[gId] = aggregate + else: + for k, v in metric.fields.items(): + if type(v) in supported_types: + min_max = aggregate["fields"].get(k) + if min_max == None: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + elif v < min_max["min"]: + aggregate["fields"][k]["min"] = v + elif v > min_max["max"]: + aggregate["fields"][k]["max"] = v + +def push(): + metrics = [] + for a in state: + fields = {} + for k in state[a]["fields"]: + fields[k + "_min"] = state[a]["fields"][k]["min"] + fields[k + "_max"] = state[a]["fields"][k]["max"] + m = Metric(state[a]["name"], state[a]["tags"], fields) + metrics.append(m) + return metrics + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + return hash(key) \ No newline at end of file diff --git a/plugins/aggregators/valuecounter/README.md b/plugins/aggregators/valuecounter/README.md index ef68e0f4e57ca..1f74a4982c577 100644 --- a/plugins/aggregators/valuecounter/README.md +++ b/plugins/aggregators/valuecounter/README.md @@ -15,7 +15,7 @@ Counting fields with a high number of potential values may produce significant amounts of new fields and memory usage, take care to only count fields with a limited set of values. -### Configuration: +## Configuration ```toml [[aggregators.valuecounter]] @@ -29,22 +29,23 @@ limited set of values. fields = ["status"] ``` -### Measurements & Fields: +### Measurements & Fields - measurement1 - - field_value1 - - field_value2 + - field_value1 + - field_value2 -### Tags: +### Tags No tags are applied by this aggregator. -### Example Output: +## Example Output Example for parsing a HTTP access log. telegraf.conf: -``` + +```toml [[inputs.logparser]] files = ["/tmp/tst.log"] [inputs.logparser.grok] @@ -57,13 +58,14 @@ telegraf.conf: ``` /tmp/tst.log -``` + +```text /some/path 200 /some/path 401 /some/path 200 ``` -``` +```shell $ telegraf --config telegraf.conf --quiet access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011 diff --git a/plugins/common/kafka/scram_client.go b/plugins/common/kafka/scram_client.go index f6aa9d6c4e285..765e76e96f7e1 100644 --- a/plugins/common/kafka/scram_client.go +++ b/plugins/common/kafka/scram_client.go @@ -27,8 +27,7 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { - response, err = x.ClientConversation.Step(challenge) - return + return x.ClientConversation.Step(challenge) } func (x *XDGSCRAMClient) Done() bool { diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index 7451639a75423..84aae8fe8557b 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -2,7 +2,7 @@ package logrus import ( "io" - "log" + "log" //nolint:revive // Allow exceptional but valid use of log here. "strings" "sync" @@ -14,7 +14,7 @@ var once sync.Once type LogHook struct { } -// Install a logging hook into the logrus standard logger, diverting all logs +// InstallHook installs a logging hook into the logrus standard logger, diverting all logs // through the Telegraf logger at debug level. This is useful for libraries // that directly log to the logrus system without providing an override method. func InstallHook() { diff --git a/plugins/common/proxy/proxy.go b/plugins/common/proxy/proxy.go index 4ef97f1eb52e8..00efbb7ae7b0f 100644 --- a/plugins/common/proxy/proxy.go +++ b/plugins/common/proxy/proxy.go @@ -14,11 +14,11 @@ type proxyFunc func(req *http.Request) (*url.URL, error) func (p *HTTPProxy) Proxy() (proxyFunc, error) { if len(p.HTTPProxyURL) > 0 { - url, err := url.Parse(p.HTTPProxyURL) + address, err := url.Parse(p.HTTPProxyURL) if err != nil { return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) } - return http.ProxyURL(url), nil + return http.ProxyURL(address), nil } return http.ProxyFromEnvironment, nil } diff --git a/plugins/common/shim/README.md b/plugins/common/shim/README.md index 5453c90a4d548..e58249608ae48 100644 --- a/plugins/common/shim/README.md +++ b/plugins/common/shim/README.md @@ -4,6 +4,7 @@ The goal of this _shim_ is to make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This allows anyone to build and run it as a separate app using one of the execd plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) @@ -56,8 +57,8 @@ execd plugins: Refer to the execd plugin readmes for more information. -## Congratulations! +## Congratulations You've done it! Consider publishing your plugin to github and open a Pull Request back to the Telegraf repo letting us know about the availability of your -[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). \ No newline at end of file +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index 089c2b7ee7525..ad08e08ea803d 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,7 +3,7 @@ package shim import ( "errors" "fmt" - "log" + "log" //nolint:revive // Allow exceptional but valid use of log here. "os" "github.com/BurntSushi/toml" diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 75ad18239fbb0..ffe58a1d5de0a 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -5,16 +5,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" tgConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/processors" - "github.com/stretchr/testify/require" ) func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + err := os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") + require.NoError(t, err) + err = os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, err) inputs.Add("test", func() telegraf.Input { return &serviceInput{} @@ -54,6 +57,7 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*time.Second, inp.Duration) require.EqualValues(t, 3*1000*1000, inp.Size) + require.EqualValues(t, 52, inp.Hex) } func TestLoadingProcessorWithConfig(t *testing.T) { @@ -72,6 +76,7 @@ func TestLoadingProcessorWithConfig(t *testing.T) { type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { diff --git a/plugins/common/shim/goshim.go b/plugins/common/shim/goshim.go index 7be139194520f..ad03cff22d79e 100644 --- a/plugins/common/shim/goshim.go +++ b/plugins/common/shim/goshim.go @@ -84,13 +84,13 @@ func (s *Shim) Run(pollInterval time.Duration) error { if err != nil { return fmt.Errorf("RunProcessor error: %w", err) } - } else if s.Output != nil { + } else if s.Output != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code err := s.RunOutput() if err != nil { return fmt.Errorf("RunOutput error: %w", err) } } else { - return fmt.Errorf("Nothing to run") + return fmt.Errorf("nothing to run") } return nil @@ -102,7 +102,7 @@ func hasQuit(ctx context.Context) bool { func (s *Shim) writeProcessedMetrics() error { serializer := influx.NewSerializer() - for { + for { //nolint:gosimple // for-select used on purpose select { case m, open := <-s.metricCh: if !open { @@ -113,7 +113,10 @@ func (s *Shim) writeProcessedMetrics() error { return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + _, err = fmt.Fprint(s.stdout, string(b)) + if err != nil { + return fmt.Errorf("failed to write metric: %s", err) + } } } } diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go index bbd1a0b703cc5..0f2bd4c7d3bb9 100644 --- a/plugins/common/shim/goshim_test.go +++ b/plugins/common/shim/goshim_test.go @@ -8,8 +8,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" ) func TestShimSetsUpLogger(t *testing.T) { @@ -18,7 +19,8 @@ func TestShimSetsUpLogger(t *testing.T) { runErroringInputPlugin(t, 40*time.Second, stdinReader, nil, stderrWriter) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) // <-metricProcessed @@ -27,7 +29,8 @@ func TestShimSetsUpLogger(t *testing.T) { require.NoError(t, err) require.Contains(t, out, "Error in plugin: intentional") - stdinWriter.Close() + err = stdinWriter.Close() + require.NoError(t, err) } func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) { @@ -46,7 +49,8 @@ func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reade shim.stderr = stderr log.SetOutput(stderr) } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 9a0423261ac14..26d164e54c3e7 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -34,7 +34,8 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -43,7 +44,8 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + err = stdinWriter.Close() + require.NoError(t, err) go func() { _, _ = io.ReadAll(r) }() @@ -68,7 +70,8 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou if stderr != nil { shim.stderr = stderr } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go index c8a6ee12ba350..74bfbfdef2019 100644 --- a/plugins/common/shim/logger.go +++ b/plugins/common/shim/logger.go @@ -2,7 +2,7 @@ package shim import ( "fmt" - "log" + "log" //nolint:revive // Allow exceptional but valid use of log here. "os" "reflect" @@ -66,7 +66,7 @@ func (l *Logger) Info(args ...interface{}) { // setLoggerOnPlugin injects the logger into the plugin, // if it defines Log telegraf.Logger. This is sort of like SetLogger but using // reflection instead of forcing the plugin author to define the function for it -func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { +func setLoggerOnPlugin(i interface{}, logger telegraf.Logger) { valI := reflect.ValueOf(i) if valI.Type().Kind() != reflect.Ptr { @@ -78,10 +78,9 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { return } - switch field.Type().String() { - case "telegraf.Logger": + if field.Type().String() == "telegraf.Logger" { if field.CanSet() { - field.Set(reflect.ValueOf(log)) + field.Set(reflect.ValueOf(logger)) } } } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index bc00fb70d1bba..072367a98dcf9 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -8,11 +8,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/stretchr/testify/require" ) func TestProcessorShim(t *testing.T) { @@ -95,8 +96,8 @@ type testProcessor struct { } func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - metric.AddTag(p.tagName, p.tagValue) + for _, m := range in { + m.AddTag(p.tagName, p.tagValue) } return in } diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf index c324b638497c5..53af78620701d 100644 --- a/plugins/common/shim/testdata/special.conf +++ b/plugins/common/shim/testdata/special.conf @@ -1,4 +1,5 @@ # testing custom field types [[inputs.test]] duration = "3s" - size = "3MB" \ No newline at end of file + size = "3MB" + hex = 0x34 \ No newline at end of file diff --git a/plugins/processors/starlark/builtins.go b/plugins/common/starlark/builtins.go similarity index 90% rename from plugins/processors/starlark/builtins.go rename to plugins/common/starlark/builtins.go index 6876fe9636ab5..9bca11af77837 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/common/starlark/builtins.go @@ -5,21 +5,48 @@ import ( "sort" "time" - "github.com/influxdata/telegraf/metric" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf/metric" ) func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name starlark.String - if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil { + var ( + name starlark.String + tags, fields starlark.Value + ) + if err := starlark.UnpackArgs("Metric", args, kwargs, "name", &name, "tags?", &tags, "fields?", &fields); err != nil { + return nil, err + } + + allFields, err := toFields(fields) + if err != nil { + return nil, err + } + allTags, err := toTags(tags) + if err != nil { return nil, err } - m := metric.New(string(name), nil, nil, time.Now()) + m := metric.New(string(name), allTags, allFields, time.Now()) return &Metric{metric: m}, nil } +func toString(value starlark.Value, errorMsg string) (string, error) { + if value, ok := value.(starlark.String); ok { + return string(value), nil + } + return "", fmt.Errorf(errorMsg, value) +} + +func items(value starlark.Value, errorMsg string) ([]starlark.Tuple, error) { + if iter, ok := value.(starlark.IterableMapping); ok { + return iter.Items(), nil + } + return nil, fmt.Errorf(errorMsg, value) +} + func deepcopy(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *Metric if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil { @@ -184,11 +211,11 @@ func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) } defer iter2.Done() - len := starlark.Len(pair) - if len < 0 { + length := starlark.Len(pair) + if length < 0 { return nil, fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) + } else if length != 2 { + return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, length) } var k, v starlark.Value iter2.Next(&k) diff --git a/plugins/processors/starlark/field_dict.go b/plugins/common/starlark/field_dict.go similarity index 82% rename from plugins/processors/starlark/field_dict.go rename to plugins/common/starlark/field_dict.go index 4a332b8268d9d..8b09a045be8e9 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/common/starlark/field_dict.go @@ -6,8 +6,9 @@ import ( "reflect" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // FieldDict is a starlark.Value for the metric fields. It is heavily based on the @@ -18,17 +19,17 @@ type FieldDict struct { func (d FieldDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -181,7 +182,7 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d FieldDict) Iterate() starlark.Iterator { d.fieldIterCount++ return &FieldIterator{Metric: d.Metric, fields: d.metric.FieldList()} @@ -274,3 +275,27 @@ func asGoValue(value interface{}) (interface{}, error) { return nil, errors.New("invalid starlark type") } + +// ToFields converts a starlark.Value to a map of values. +func toFields(value starlark.Value) (map[string]interface{}, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of fields") + if err != nil { + return nil, err + } + result := make(map[string]interface{}, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for fields") + if err != nil { + return nil, err + } + value, err := asGoValue(item[1]) + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/processors/starlark/logging.go b/plugins/common/starlark/logging.go similarity index 100% rename from plugins/processors/starlark/logging.go rename to plugins/common/starlark/logging.go diff --git a/plugins/processors/starlark/metric.go b/plugins/common/starlark/metric.go similarity index 73% rename from plugins/processors/starlark/metric.go rename to plugins/common/starlark/metric.go index 031d24ad69635..989c345765cff 100644 --- a/plugins/processors/starlark/metric.go +++ b/plugins/common/starlark/metric.go @@ -6,8 +6,9 @@ import ( "strings" "time" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) type Metric struct { @@ -36,15 +37,15 @@ func (m *Metric) Unwrap() telegraf.Metric { // it behaves more like the repr function would in Python. func (m *Metric) String() string { buf := new(strings.Builder) - buf.WriteString("Metric(") - buf.WriteString(m.Name().String()) - buf.WriteString(", tags=") - buf.WriteString(m.Tags().String()) - buf.WriteString(", fields=") - buf.WriteString(m.Fields().String()) - buf.WriteString(", time=") - buf.WriteString(m.Time().String()) - buf.WriteString(")") + buf.WriteString("Metric(") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Name().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", tags=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Tags().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", fields=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Fields().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", time=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Time().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(")") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } diff --git a/plugins/common/starlark/starlark.go b/plugins/common/starlark/starlark.go new file mode 100644 index 0000000000000..5f365519871d0 --- /dev/null +++ b/plugins/common/starlark/starlark.go @@ -0,0 +1,182 @@ +package starlark //nolint - Needed to avoid getting import-shadowing: The name 'starlark' shadows an import name (revive) + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + "go.starlark.net/lib/math" + "go.starlark.net/lib/time" + "go.starlark.net/resolve" + "go.starlark.net/starlark" + "go.starlark.net/starlarkjson" +) + +type StarlarkCommon struct { + Source string `toml:"source"` + Script string `toml:"script"` + Constants map[string]interface{} `toml:"constants"` + + Log telegraf.Logger `toml:"-"` + StarlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) + + thread *starlark.Thread + globals starlark.StringDict + functions map[string]*starlark.Function + parameters map[string]starlark.Tuple +} + +func (s *StarlarkCommon) Init() error { + if s.Source == "" && s.Script == "" { + return errors.New("one of source or script must be set") + } + if s.Source != "" && s.Script != "" { + return errors.New("both source or script cannot be set") + } + + s.thread = &starlark.Thread{ + Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, + Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { + return s.StarlarkLoadFunc(module, s.Log) + }, + } + + builtins := starlark.StringDict{} + builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) + builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + builtins["catch"] = starlark.NewBuiltin("catch", catch) + err := s.addConstants(&builtins) + if err != nil { + return err + } + + program, err := s.sourceProgram(builtins, "") + if err != nil { + return err + } + + // Execute source + globals, err := program.Init(s.thread, builtins) + if err != nil { + return err + } + // Make available a shared state to the apply function + globals["state"] = starlark.NewDict(0) + + // Freeze the global state. This prevents modifications to the processor + // state and prevents scripts from containing errors storing tracking + // metrics. Tasks that require global state will not be possible due to + // this, so maybe we should relax this in the future. + globals.Freeze() + + s.globals = globals + s.functions = make(map[string]*starlark.Function) + s.parameters = make(map[string]starlark.Tuple) + return nil +} + +func (s *StarlarkCommon) GetParameters(name string) (starlark.Tuple, bool) { + parameters, found := s.parameters[name] + return parameters, found +} + +func (s *StarlarkCommon) AddFunction(name string, params ...starlark.Value) error { + globalFn, found := s.globals[name] + if !found { + return fmt.Errorf("%s is not defined", name) + } + + fn, found := globalFn.(*starlark.Function) + if !found { + return fmt.Errorf("%s is not a function", name) + } + + if fn.NumParams() != len(params) { + return fmt.Errorf("%s function must take %d parameter(s)", name, len(params)) + } + p := make(starlark.Tuple, len(params)) + for i, param := range params { + p[i] = param + } + s.functions[name] = fn + s.parameters[name] = params + return nil +} + +// Add all the constants defined in the plugin as constants of the script +func (s *StarlarkCommon) addConstants(builtins *starlark.StringDict) error { + for key, val := range s.Constants { + sVal, err := asStarlarkValue(val) + if err != nil { + return fmt.Errorf("converting type %T failed: %v", val, err) + } + (*builtins)[key] = sVal + } + return nil +} + +func (s *StarlarkCommon) sourceProgram(builtins starlark.StringDict, filename string) (*starlark.Program, error) { + var src interface{} + if s.Source != "" { + src = s.Source + } + _, program, err := starlark.SourceProgram(s.Script, src, builtins.Has) + return program, err +} + +// Call calls the function corresponding to the given name. +func (s *StarlarkCommon) Call(name string) (starlark.Value, error) { + fn, ok := s.functions[name] + if !ok { + return nil, fmt.Errorf("function %q does not exist", name) + } + args, ok := s.parameters[name] + if !ok { + return nil, fmt.Errorf("params for function %q do not exist", name) + } + return starlark.Call(s.thread, fn, args, nil) +} + +func (s *StarlarkCommon) LogError(err error) { + if err, ok := err.(*starlark.EvalError); ok { + for _, line := range strings.Split(err.Backtrace(), "\n") { + s.Log.Error(line) + } + } else { + s.Log.Error(err.Msg) + } +} + +func LoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { + switch module { + case "json.star": + return starlark.StringDict{ + "json": starlarkjson.Module, + }, nil + case "logging.star": + return starlark.StringDict{ + "log": LogModule(logger), + }, nil + case "math.star": + return starlark.StringDict{ + "math": math.Module, + }, nil + case "time.star": + return starlark.StringDict{ + "time": time.Module, + }, nil + default: + return nil, errors.New("module " + module + " is not available") + } +} + +func init() { + // https://github.com/bazelbuild/starlark/issues/20 + resolve.AllowNestedDef = true + resolve.AllowLambda = true + resolve.AllowFloat = true + resolve.AllowSet = true + resolve.AllowGlobalReassign = true + resolve.AllowRecursion = true +} diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/common/starlark/tag_dict.go similarity index 75% rename from plugins/processors/starlark/tag_dict.go rename to plugins/common/starlark/tag_dict.go index 7dbb8c12d0ed6..56ee0f6551d81 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/common/starlark/tag_dict.go @@ -5,8 +5,9 @@ import ( "fmt" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // TagDict is a starlark.Value for the metric tags. It is heavily based on the @@ -17,17 +18,17 @@ type TagDict struct { func (d TagDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -168,7 +169,7 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d TagDict) Iterate() starlark.Iterator { d.tagIterCount++ return &TagIterator{Metric: d.Metric, tags: d.metric.TagList()} @@ -196,3 +197,27 @@ func (i *TagIterator) Next(p *starlark.Value) bool { func (i *TagIterator) Done() { i.tagIterCount-- } + +// ToTags converts a starlark.Value to a map of string. +func toTags(value starlark.Value) (map[string]string, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of tags") + if err != nil { + return nil, err + } + result := make(map[string]string, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for tags") + if err != nil { + return nil, err + } + value, err := toString(item[1], "The type %T is unsupported as type of value for tags") + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 271d63e7cac2e..457f31e4162a1 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,9 +4,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "github.com/influxdata/telegraf/internal/choice" "os" "strings" + + "github.com/influxdata/telegraf/internal/choice" ) // ClientConfig represents the standard client TLS config. @@ -14,6 +15,7 @@ type ClientConfig struct { TLSCA string `toml:"tls_ca"` TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` InsecureSkipVerify bool `toml:"insecure_skip_verify"` ServerName string `toml:"tls_server_name"` @@ -27,6 +29,7 @@ type ClientConfig struct { type ServerConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` TLSCipherSuites []string `toml:"tls_cipher_suites"` TLSMinVersion string `toml:"tls_min_version"` diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index b118c48b5f912..123523bb54f05 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") @@ -33,6 +34,15 @@ func TestClientConfig(t *testing.T) { TLSKey: pki.ClientKeyPath(), }, }, + { + name: "success with tls key password set", + client: tls.ClientConfig{ + TLSCA: pki.CACertPath(), + TLSCert: pki.ClientCertPath(), + TLSKey: pki.ClientKeyPath(), + TLSKeyPwd: "", + }, + }, { name: "invalid ca", client: tls.ClientConfig{ @@ -137,6 +147,18 @@ func TestServerConfig(t *testing.T) { TLSMaxVersion: pki.TLSMaxVersion(), }, }, + { + name: "success with tls key password set", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSKeyPwd: "", + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, { name: "missing tls cipher suites is okay", server: tls.ServerConfig{ @@ -323,6 +345,8 @@ func TestConnect(t *testing.T) { resp, err := client.Get(ts.URL) require.NoError(t, err) + + defer resp.Body.Close() require.Equal(t, 200, resp.StatusCode) } diff --git a/plugins/common/tls/utils.go b/plugins/common/tls/utils.go index ddc12d2c1e5e3..65388640f7dd8 100644 --- a/plugins/common/tls/utils.go +++ b/plugins/common/tls/utils.go @@ -10,11 +10,11 @@ func ParseCiphers(ciphers []string) ([]uint16, error) { suites := []uint16{} for _, cipher := range ciphers { - if v, ok := tlsCipherMap[cipher]; ok { - suites = append(suites, v) - } else { + v, ok := tlsCipherMap[cipher] + if !ok { return nil, fmt.Errorf("unsupported cipher %q", cipher) } + suites = append(suites, v) } return suites, nil diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md index aba5a7f83ec27..90a8bfc2b3383 100644 --- a/plugins/inputs/activemq/README.md +++ b/plugins/inputs/activemq/README.md @@ -2,7 +2,7 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. -### Configuration: +## Configuration ```toml # Description @@ -33,7 +33,7 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A # insecure_skip_verify = false ``` -### Metrics +## Metrics Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. @@ -47,7 +47,7 @@ Every effort was made to preserve the names based on the XML response from the A - consumer_count - enqueue_count - dequeue_count -+ activemq_topics +- activemq_topics - tags: - name - source @@ -76,7 +76,7 @@ Every effort was made to preserve the names based on the XML response from the A ### Example Output -``` +```shell activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md index 59ff6ed702db7..aed19ed773825 100644 --- a/plugins/inputs/aerospike/README.md +++ b/plugins/inputs/aerospike/README.md @@ -9,7 +9,8 @@ The metric names, to make it less complicated in querying, have replaced all `-` All metrics are attempted to be cast to integers, then booleans, then strings. -### Configuration: +## Configuration + ```toml # Read stats from aerospike server(s) [[inputs.aerospike]] @@ -48,68 +49,66 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow # the ability to squash this to a smaller number of buckets - # To have a balanced histogram, the number of buckets chosen + # To have a balanced histogram, the number of buckets chosen # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 ``` -### Measurements: +## Measurements The aerospike metrics are under a few measurement names: ***aerospike_node***: These are the aerospike **node** measurements, which are available from the aerospike `statistics` command. - ie, - ``` - telnet localhost 3003 - statistics - ... - ``` +```text + telnet localhost 3003 + statistics + ... +``` ***aerospike_namespace***: These are aerospike namespace measurements, which are available from the aerospike `namespace/` command. - ie, - ``` - telnet localhost 3003 - namespaces - ;;etc. - namespace/ - ... - ``` +```text + telnet localhost 3003 + namespaces + ;;etc. + namespace/ + ... +``` + ***aerospike_set***: These are aerospike set measurements, which are available from the aerospike `sets//` command. - ie, - ``` - telnet localhost 3003 - sets - sets/ - sets// - ... - ``` +```text + telnet localhost 3003 + sets + sets/ + sets// + ... +``` + ***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=ttl` command. - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=ttl - histogram:namespace=;[set=;]type=ttl - ... - ``` +```text + telnet localhost 3003 + histogram:namespace=;type=ttl + histogram:namespace=;[set=;]type=ttl + ... +``` + ***aerospike_histogram_object_size_linear***: These are aerospike object size linear histogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=object_size_linear` command. - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=object_size_linear - histogram:namespace=;[set=;]type=object_size_linear - ... - ``` +```text + telnet localhost 3003 + histogram:namespace=;type=object_size_linear + histogram:namespace=;[set=;]type=object_size_linear + ... +``` -### Tags: +### Tags All measurements have tags: @@ -126,13 +125,14 @@ Set metrics have tags: - set_name Histogram metrics have tags: + - namespace_name - set_name (optional) - type -### Example Output: +## Example Output -``` +```shell % telegraf --input-filter aerospike --test > aerospike_node,aerospike_host=localhost:3000,node_name="BB9020011AC4202" batch_error=0i,batch_index_complete=0i,batch_index_created_buffers=0i,batch_index_destroyed_buffers=0i,batch_index_error=0i,batch_index_huge_buffers=0i,batch_index_initiate=0i,batch_index_queue="0:0,0:0,0:0,0:0",batch_index_timeout=0i,batch_index_unused_buffers=0i,batch_initiate=0i,batch_queue=0i,batch_timeout=0i,client_connections=6i,cluster_integrity=true,cluster_key="8AF422E05281249E",cluster_size=1i,delete_queue=0i,demarshal_error=0i,early_tsvc_batch_sub_error=0i,early_tsvc_client_error=0i,early_tsvc_udf_sub_error=0i,fabric_connections=16i,fabric_msgs_rcvd=0i,fabric_msgs_sent=0i,heartbeat_connections=0i,heartbeat_received_foreign=0i,heartbeat_received_self=0i,info_complete=47i,info_queue=0i,migrate_allowed=true,migrate_partitions_remaining=0i,migrate_progress_recv=0i,migrate_progress_send=0i,objects=0i,paxos_principal="BB9020011AC4202",proxy_in_progress=0i,proxy_retry=0i,query_long_running=0i,query_short_running=0i,reaped_fds=0i,record_refs=0i,rw_in_progress=0i,scans_active=0i,sindex_gc_activity_dur=0i,sindex_gc_garbage_cleaned=0i,sindex_gc_garbage_found=0i,sindex_gc_inactivity_dur=0i,sindex_gc_list_creation_time=0i,sindex_gc_list_deletion_time=0i,sindex_gc_locktimedout=0i,sindex_gc_objects_validated=0i,sindex_ucgarbage_found=0i,sub_objects=0i,system_free_mem_pct=92i,system_swapping=false,tsvc_queue=0i,uptime=1457i 1468923222000000000 > aerospike_namespace,aerospike_host=localhost:3000,namespace=test,node_name="BB9020011AC4202" allow_nonxdr_writes=true,allow_xdr_writes=true,available_bin_names=32768i,batch_sub_proxy_complete=0i,batch_sub_proxy_error=0i,batch_sub_proxy_timeout=0i,batch_sub_read_error=0i,batch_sub_read_not_found=0i,batch_sub_read_success=0i,batch_sub_read_timeout=0i,batch_sub_tsvc_error=0i,batch_sub_tsvc_timeout=0i,client_delete_error=0i,client_delete_not_found=0i,client_delete_success=0i,client_delete_timeout=0i,client_lang_delete_success=0i,client_lang_error=0i,client_lang_read_success=0i,client_lang_write_success=0i,client_proxy_complete=0i,client_proxy_error=0i,client_proxy_timeout=0i,client_read_error=0i,client_read_not_found=0i,client_read_success=0i,client_read_timeout=0i,client_tsvc_error=0i,client_tsvc_timeout=0i,client_udf_complete=0i,client_udf_error=0i,client_udf_timeout=0i,client_write_error=0i,client_write_success=0i,client_write_timeout=0i,cold_start_evict_ttl=4294967295i,conflict_resolution_policy="generation",current_time=206619222i,data_in_index=false,default_ttl=432000i,device_available_pct=99i,device_free_pct=100i,device_total_bytes=4294967296i,device_used_bytes=0i,disallow_null_setname=false,enable_benchmarks_batch_sub=false,enable_benchmarks_read=false,enable_benchmarks_storage=false,enable_benchmarks_udf=false,enable_benchmarks_udf_sub=false,enable_benchmarks_write=false,enable_hist_proxy=false,enable_xdr=false,evict_hist_buckets=10000i,evict_tenths_pct=5i,evict_ttl=0i,evicted_objects=0i,expired_objects=0i,fail_generation=0i,fail_key_busy=0i,fail_record_too_big=0i,fail_xdr_forbidden=0i,geo2dsphere_within.earth_radius_meters=6371000i,geo2dsphere_within.level_mod=1i,geo2dsphere_within.max_cells=12i,geo2dsphere_within.max_level=30i,geo2dsphere_within.min_level=1i,geo2dsphere_within.strict=true,geo_region_query_cells=0i,geo_region_query_falsepos=0i,geo_region_query_points=0i,geo_region_query_reqs=0i,high_water_disk_pct=50i,high_water_memory_pct=60i,hwm_breached=false,ldt_enabled=false,ldt_gc_rate=0i,ldt_page_size=8192i,master_objects=0i,master_sub_objects=0i,max_ttl=315360000i,max_void_time=0i,memory_free_pct=100i,memory_size=1073741824i,memory_used_bytes=0i,memory_used_data_bytes=0i,memory_used_index_bytes=0i,memory_used_sindex_bytes=0i,migrate_order=5i,migrate_record_receives=0i,migrate_record_retransmits=0i,migrate_records_skipped=0i,migrate_records_transmitted=0i,migrate_rx_instances=0i,migrate_rx_partitions_active=0i,migrate_rx_partitions_initial=0i,migrate_rx_partitions_remaining=0i,migrate_sleep=1i,migrate_tx_instances=0i,migrate_tx_partitions_active=0i,migrate_tx_partitions_imbalance=0i,migrate_tx_partitions_initial=0i,migrate_tx_partitions_remaining=0i,non_expirable_objects=0i,ns_forward_xdr_writes=false,nsup_cycle_duration=0i,nsup_cycle_sleep_pct=0i,objects=0i,prole_objects=0i,prole_sub_objects=0i,query_agg=0i,query_agg_abort=0i,query_agg_avg_rec_count=0i,query_agg_error=0i,query_agg_success=0i,query_fail=0i,query_long_queue_full=0i,query_long_reqs=0i,query_lookup_abort=0i,query_lookup_avg_rec_count=0i,query_lookup_error=0i,query_lookup_success=0i,query_lookups=0i,query_reqs=0i,query_short_queue_full=0i,query_short_reqs=0i,query_udf_bg_failure=0i,query_udf_bg_success=0i,read_consistency_level_override="off",repl_factor=1i,scan_aggr_abort=0i,scan_aggr_complete=0i,scan_aggr_error=0i,scan_basic_abort=0i,scan_basic_complete=0i,scan_basic_error=0i,scan_udf_bg_abort=0i,scan_udf_bg_complete=0i,scan_udf_bg_error=0i,set_deleted_objects=0i,sets_enable_xdr=true,sindex.data_max_memory="ULONG_MAX",sindex.num_partitions=32i,single_bin=false,stop_writes=false,stop_writes_pct=90i,storage_engine="device",storage_engine.cold_start_empty=false,storage_engine.data_in_memory=true,storage_engine.defrag_lwm_pct=50i,storage_engine.defrag_queue_min=0i,storage_engine.defrag_sleep=1000i,storage_engine.defrag_startup_minimum=10i,storage_engine.disable_odirect=false,storage_engine.enable_osync=false,storage_engine.file="/opt/aerospike/data/test.dat",storage_engine.filesize=4294967296i,storage_engine.flush_max_ms=1000i,storage_engine.fsync_max_sec=0i,storage_engine.max_write_cache=67108864i,storage_engine.min_avail_pct=5i,storage_engine.post_write_queue=0i,storage_engine.scheduler_mode="null",storage_engine.write_block_size=1048576i,storage_engine.write_threads=1i,sub_objects=0i,udf_sub_lang_delete_success=0i,udf_sub_lang_error=0i,udf_sub_lang_read_success=0i,udf_sub_lang_write_success=0i,udf_sub_tsvc_error=0i,udf_sub_tsvc_timeout=0i,udf_sub_udf_complete=0i,udf_sub_udf_error=0i,udf_sub_udf_timeout=0i,write_commit_level_override="off",xdr_write_error=0i,xdr_write_success=0i,xdr_write_timeout=0i,{test}_query_hist_track_back=300i,{test}_query_hist_track_slice=10i,{test}_query_hist_track_thresholds="1,8,64",{test}_read_hist_track_back=300i,{test}_read_hist_track_slice=10i,{test}_read_hist_track_thresholds="1,8,64",{test}_udf_hist_track_back=300i,{test}_udf_hist_track_slice=10i,{test}_udf_hist_track_thresholds="1,8,64",{test}_write_hist_track_back=300i,{test}_write_hist_track_slice=10i,{test}_write_hist_track_thresholds="1,8,64" 1468923222000000000 diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md index 4e351ea6d8b37..0f6331d7df57d 100644 --- a/plugins/inputs/aliyuncms/README.md +++ b/plugins/inputs/aliyuncms/README.md @@ -1,12 +1,14 @@ # Alibaba (Aliyun) CloudMonitor Service Statistics Input Plugin + Here and after we use `Aliyun` instead `Alibaba` as it is default naming across web console and docs. This plugin will pull Metric Statistics from Aliyun CMS. -### Aliyun Authentication +## Aliyun Authentication This plugin uses an [AccessKey](https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572) credential for Authentication with the Aliyun OpenAPI endpoint. In the following order the plugin will attempt to authenticate. + 1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`, `role_session_name` is specified 2. AccessKey STS token credential if `access_key_id`, `access_key_secret`, `access_key_sts_token` is specified 3. AccessKey credential if `access_key_id`, `access_key_secret` is specified @@ -15,7 +17,7 @@ In the following order the plugin will attempt to authenticate. 6. Environment variables credential 7. Instance metadata credential -### Configuration: +## Configuration ```toml ## Aliyun Credentials @@ -27,7 +29,7 @@ In the following order the plugin will attempt to authenticate. ## 5) RSA keypair credential ## 6) Environment variables credential ## 7) Instance metadata credential - + # access_key_id = "" # access_key_secret = "" # access_key_sts_token = "" @@ -38,7 +40,7 @@ In the following order the plugin will attempt to authenticate. # role_name = "" ## Specify the ali cloud region list to be queried for metrics and objects discovery - ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm ## Default supported regions are: ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, @@ -46,14 +48,14 @@ In the following order the plugin will attempt to authenticate. ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 ## ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich - ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then ## it will be reported on the start - for example for 'acs_cdn' project: ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) ## Currently, discovery supported for the following projects: ## - acs_ecs_dashboard ## - acs_rds_dashboard ## - acs_slb_dashboard - ## - acs_vpc_eip + ## - acs_vpc_eip regions = ["cn-hongkong"] # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all @@ -66,41 +68,41 @@ In the following order the plugin will attempt to authenticate. # ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) period = "5m" - + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) delay = "1m" - + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" - + ## Metric Statistic Project (required) project = "acs_slb_dashboard" - + ## Maximum requests per second, default value is 200 ratelimit = 200 - + ## How often the discovery API call executed (default 1m) #discovery_interval = "1m" - + ## Metrics to Pull (Required) [[inputs.aliyuncms.metrics]] - ## Metrics names to be requested, + ## Metrics names to be requested, ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq names = ["InstanceActiveConnection", "InstanceNewConnection"] - + ## Dimension filters for Metric (these are optional). ## This allows to get additional metric dimension. If dimension is not specified it can be returned or ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq ## ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) ## Values specified here would be added into the list of discovered objects. - ## You can specify either single dimension: + ## You can specify either single dimension: #dimensions = '{"instanceId": "p-example"}' - + ## Or you can specify several dimensions at once: #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' - + ## Enrichment tags, can be added from discovery (if supported) ## Notation is : ## To figure out which fields are available, consult the Describe API per project. @@ -111,14 +113,14 @@ In the following order the plugin will attempt to authenticate. # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" # ] ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. - + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery - ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage - ## of discovery scope vs monitoring scope + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope #allow_dps_without_discovery = false ``` -#### Requirements and Terminology +### Requirements and Terminology Plugin Configuration utilizes [preset metric items references](https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0) @@ -128,7 +130,7 @@ Plugin Configuration utilizes [preset metric items references](https://www.aliba - `names` must be preset metric names - `dimensions` must be preset dimension values -### Measurements & Fields: +## Measurements & Fields Each Aliyun CMS Project monitored records a measurement with fields for each available Metric Statistic Project and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -139,9 +141,9 @@ Project and Metrics are represented in [snake case](https://en.wikipedia.org/wik - {metric}_maximum (metric Maximum value) - {metric}_value (metric Value value) -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter aliyuncms --test > aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 690df0d3b0e46..b0a41447ea9f0 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -71,6 +71,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu" _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" @@ -127,6 +128,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" + _ "github.com/influxdata/telegraf/plugins/inputs/nomad" _ "github.com/influxdata/telegraf/plugins/inputs/nsd" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" @@ -137,6 +139,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/openldap" _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/openstack" _ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" @@ -195,6 +198,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/unbound" _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" _ "github.com/influxdata/telegraf/plugins/inputs/varnish" + _ "github.com/influxdata/telegraf/plugins/inputs/vault" _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md index ac080974dd274..f33df02c1ab3a 100644 --- a/plugins/inputs/amd_rocm_smi/README.md +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -2,7 +2,7 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools) binary to pull GPU stats including memory and GPU usage, temperatures and other. -### Configuration +## Configuration ```toml # Pulls statistics from AMD GPUs attached to the host @@ -14,7 +14,8 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenComput # timeout = "5s" ``` -### Metrics +## Metrics + - measurement: `amd_rocm_smi` - tags - `name` (entry name assigned by rocm-smi executable) @@ -36,21 +37,28 @@ This plugin uses a query on the [`rocm-smi`](https://github.com/RadeonOpenComput - `clocks_current_memory` (integer, Mhz) - `power_draw` (float, Watt) -### Troubleshooting +## Troubleshooting + Check the full output by running `rocm-smi` binary manually. Linux: + ```sh rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json ``` + Please include the output of this command if opening a GitHub issue, together with ROCm version. + ### Example Output -``` + +```shell amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 ``` + ### Limitations and notices + Please notice that this plugin has been developed and tested on a limited number of versions and small set of GPUs. Currently the latest ROCm version tested is 4.3.0. Notice that depending on the device and driver versions the amount of information provided by `rocm-smi` can vary so that some fields would start/stop appearing in the metrics upon updates. The `rocm-smi` JSON output is not perfectly homogeneous and is possibly changing in the future, hence parsing and unmarshaling can start failing upon updating ROCm. diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index ff417eb26b67c..b383b723d67dc 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -7,8 +7,9 @@ Metrics are read from a topic exchange using the configured queue and binding_ke Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). For an introduction to AMQP see: -- https://www.rabbitmq.com/tutorials/amqp-concepts.html -- https://www.rabbitmq.com/getstarted.html + +- [amqp - concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html) +- [rabbitmq: getting started](https://www.rabbitmq.com/getstarted.html) The following defaults are known to work with RabbitMQ: diff --git a/plugins/inputs/apache/README.md b/plugins/inputs/apache/README.md index b8822edebf314..710d8cbca7d5e 100644 --- a/plugins/inputs/apache/README.md +++ b/plugins/inputs/apache/README.md @@ -4,7 +4,7 @@ The Apache plugin collects server performance information using the [`mod_status Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). -### Configuration: +## Configuration ```toml # Read Apache status information (mod_status) @@ -29,7 +29,7 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve # insecure_skip_verify = false ``` -### Measurements & Fields: +## Measurements & Fields - apache - BusyWorkers (float) @@ -71,14 +71,14 @@ The following fields are collected from the `Scoreboard`, and represent the numb - scboard_starting (float) - scboard_waiting (float) -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output -``` +```shell apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000 ``` diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md index 97526d7ec3847..eb100a462fd4c 100644 --- a/plugins/inputs/apcupsd/README.md +++ b/plugins/inputs/apcupsd/README.md @@ -2,11 +2,11 @@ This plugin reads data from an apcupsd daemon over its NIS network protocol. -### Requirements +## Requirements apcupsd should be installed and it's daemon should be running. -### Configuration +## Configuration ```toml [[inputs.apcupsd]] @@ -18,7 +18,7 @@ apcupsd should be installed and it's daemon should be running. timeout = "5s" ``` -### Metrics +## Metrics - apcupsd - tags: @@ -43,11 +43,9 @@ apcupsd should be installed and it's daemon should be running. - nominal_power - firmware +## Example output - -### Example output - -``` +```shell apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000 ``` diff --git a/plugins/inputs/aurora/README.md b/plugins/inputs/aurora/README.md index cef7ac6c7e045..90910101f624e 100644 --- a/plugins/inputs/aurora/README.md +++ b/plugins/inputs/aurora/README.md @@ -4,7 +4,7 @@ The Aurora Input Plugin gathers metrics from [Apache Aurora](https://aurora.apac For monitoring recommendations reference [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/) -### Configuration: +## Configuration ```toml [[inputs.aurora]] @@ -32,7 +32,7 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: # insecure_skip_verify = false ``` -### Metrics: +## Metrics - aurora - tags: @@ -42,22 +42,24 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: - Numeric metrics are collected from the `/vars` endpoint; string fields are not gathered. - -### Troubleshooting: +## Troubleshooting Check the Scheduler role, the leader will return a 200 status: -``` + +```shell curl -v http://127.0.0.1:8081/leaderhealth ``` Get available metrics: -``` + +```shell curl http://127.0.0.1:8081/vars ``` -### Example Output: +## Example Output The example output below has been trimmed. -``` + +```shell > aurora,role=leader,scheduler=http://debian-stretch-aurora-coordinator-3.virt:8081 CronBatchWorker_batch_locked_events=0i,CronBatchWorker_batch_locked_events_per_sec=0,CronBatchWorker_batch_locked_nanos_per_event=0,CronBatchWorker_batch_locked_nanos_total=0i,CronBatchWorker_batch_locked_nanos_total_per_sec=0,CronBatchWorker_batch_unlocked_events=0i,CronBatchWorker_batch_unlocked_events_per_sec=0,CronBatchWorker_batch_unlocked_nanos_per_event=0,CronBatchWorker_batch_unlocked_nanos_total=0i,CronBatchWorker_batch_unlocked_nanos_total_per_sec=0,CronBatchWorker_batches_processed=0i,CronBatchWorker_items_processed=0i,CronBatchWorker_last_processed_batch_size=0i,CronBatchWorker_queue_size=0i,TaskEventBatchWorker_batch_locked_events=0i,TaskEventBatchWorker_batch_locked_events_per_sec=0,TaskEventBatchWorker_batch_locked_nanos_per_event=0,TaskEventBatchWorker_batch_locked_nanos_total=0i,TaskEventBatchWorker_batch_locked_nanos_total_per_sec=0,TaskEventBatchWorker_batch_unlocked_events=0i,TaskEventBatchWorker_batch_unlocked_events_per_sec=0,TaskEventBatchWorker_batch_unlocked_nanos_per_event=0,TaskEventBatchWorker_batch_unlocked_nanos_total=0i,TaskEventBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskEventBatchWorker_batches_processed=0i,TaskEventBatchWorker_items_processed=0i,TaskEventBatchWorker_last_processed_batch_size=0i,TaskEventBatchWorker_queue_size=0i,TaskGroupBatchWorker_batch_locked_events=0i,TaskGroupBatchWorker_batch_locked_events_per_sec=0,TaskGroupBatchWorker_batch_locked_nanos_per_event=0,TaskGroupBatchWorker_batch_locked_nanos_total=0i,TaskGroupBatchWorker_batch_locked_nanos_total_per_sec=0,TaskGroupBatchWorker_batch_unlocked_events=0i,TaskGroupBatchWorker_batch_unlocked_events_per_sec=0,TaskGroupBatchWorker_batch_unlocked_nanos_per_event=0,TaskGroupBatchWorker_batch_unlocked_nanos_total=0i,TaskGroupBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskGroupBatchWorker_batches_processed=0i,TaskGroupBatchWorker_items_processed=0i,TaskGroupBatchWorker_last_processed_batch_size=0i,TaskGroupBatchWorker_queue_size=0i,assigner_launch_failures=0i,async_executor_uncaught_exceptions=0i,async_tasks_completed=1i,cron_job_collisions=0i,cron_job_concurrent_runs=0i,cron_job_launch_failures=0i,cron_job_misfires=0i,cron_job_parse_failures=0i,cron_job_triggers=0i,cron_jobs_loaded=1i,empty_slots_dedicated_large=0i,empty_slots_dedicated_medium=0i,empty_slots_dedicated_revocable_large=0i,empty_slots_dedicated_revocable_medium=0i,empty_slots_dedicated_revocable_small=0i,empty_slots_dedicated_revocable_xlarge=0i,empty_slots_dedicated_small=0i,empty_slots_dedicated_xlarge=0i,empty_slots_large=0i,empty_slots_medium=0i,empty_slots_revocable_large=0i,empty_slots_revocable_medium=0i,empty_slots_revocable_small=0i,empty_slots_revocable_xlarge=0i,empty_slots_small=0i,empty_slots_xlarge=0i,event_bus_dead_events=0i,event_bus_exceptions=1i,framework_registered=1i,globally_banned_offers_size=0i,http_200_responses_events=55i,http_200_responses_events_per_sec=0,http_200_responses_nanos_per_event=0,http_200_responses_nanos_total=310416694i,http_200_responses_nanos_total_per_sec=0,job_update_delete_errors=0i,job_update_recovery_errors=0i,job_update_state_change_errors=0i,job_update_store_delete_all_events=1i,job_update_store_delete_all_events_per_sec=0,job_update_store_delete_all_nanos_per_event=0,job_update_store_delete_all_nanos_total=1227254i,job_update_store_delete_all_nanos_total_per_sec=0,job_update_store_fetch_details_query_events=74i,job_update_store_fetch_details_query_events_per_sec=0,job_update_store_fetch_details_query_nanos_per_event=0,job_update_store_fetch_details_query_nanos_total=24643149i,job_update_store_fetch_details_query_nanos_total_per_sec=0,job_update_store_prune_history_events=59i,job_update_store_prune_history_events_per_sec=0,job_update_store_prune_history_nanos_per_event=0,job_update_store_prune_history_nanos_total=262868218i,job_update_store_prune_history_nanos_total_per_sec=0,job_updates_pruned=0i,jvm_available_processors=2i,jvm_class_loaded_count=6707i,jvm_class_total_loaded_count=6732i,jvm_class_unloaded_count=25i,jvm_gc_PS_MarkSweep_collection_count=2i,jvm_gc_PS_MarkSweep_collection_time_ms=223i,jvm_gc_PS_Scavenge_collection_count=27i,jvm_gc_PS_Scavenge_collection_time_ms=1691i,jvm_gc_collection_count=29i,jvm_gc_collection_time_ms=1914i,jvm_memory_free_mb=65i,jvm_memory_heap_mb_committed=157i,jvm_memory_heap_mb_max=446i,jvm_memory_heap_mb_used=91i,jvm_memory_max_mb=446i,jvm_memory_mb_total=157i,jvm_memory_non_heap_mb_committed=50i,jvm_memory_non_heap_mb_max=0i,jvm_memory_non_heap_mb_used=49i,jvm_threads_active=47i,jvm_threads_daemon=28i,jvm_threads_peak=48i,jvm_threads_started=62i,jvm_time_ms=1526530686927i,jvm_uptime_secs=79947i,log_entry_serialize_events=16i,log_entry_serialize_events_per_sec=0,log_entry_serialize_nanos_per_event=0,log_entry_serialize_nanos_total=4815321i,log_entry_serialize_nanos_total_per_sec=0,log_manager_append_events=16i,log_manager_append_events_per_sec=0,log_manager_append_nanos_per_event=0,log_manager_append_nanos_total=506453428i,log_manager_append_nanos_total_per_sec=0,log_manager_deflate_events=14i,log_manager_deflate_events_per_sec=0,log_manager_deflate_nanos_per_event=0,log_manager_deflate_nanos_total=21010565i,log_manager_deflate_nanos_total_per_sec=0 1526530687000000000 ``` diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md index 905e85e4cdea6..c080be4561605 100644 --- a/plugins/inputs/azure_storage_queue/README.md +++ b/plugins/inputs/azure_storage_queue/README.md @@ -2,7 +2,7 @@ This plugin gathers sizes of Azure Storage Queues. -### Configuration: +## Configuration ```toml # Description @@ -12,12 +12,13 @@ This plugin gathers sizes of Azure Storage Queues. ## Required Azure Storage Account access key account_key = "storageaccountaccesskey" - + ## Set to false to disable peeking age of oldest message (executes faster) # peek_oldest_message_age = true ``` -### Metrics +## Metrics + - azure_storage_queues - tags: - queue @@ -26,10 +27,10 @@ This plugin gathers sizes of Azure Storage Queues. - size (integer, count) - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue. Requires `peek_oldest_message_age` to be configured to `true`. - -### Example Output -``` +## Example Output + +```shell azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000 azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md index 88c9f14f9236a..0937adcfc5d07 100644 --- a/plugins/inputs/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -2,7 +2,7 @@ Get bcache stat from stats_total directory and dirty_data file. -# Measurements +## Measurements Meta: @@ -20,9 +20,9 @@ Measurement names: - cache_misses - cache_readaheads -### Description +## Description -``` +```text dirty_data Amount of dirty data for this backing device in the cache. Continuously updated unlike the cache set's version, but may be slightly off. @@ -51,7 +51,7 @@ cache_readaheads Count of times readahead occurred. ``` -# Example output +## Example Using this configuration: @@ -69,13 +69,13 @@ Using this configuration: When run with: -``` +```shell ./telegraf --config telegraf.conf --input-filter bcache --test ``` It produces: -``` +```shell * Plugin: bcache, Collection 1 > [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194 > [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832 diff --git a/plugins/inputs/beanstalkd/README.md b/plugins/inputs/beanstalkd/README.md index e4fe2203d8d9b..3b371989446f7 100644 --- a/plugins/inputs/beanstalkd/README.md +++ b/plugins/inputs/beanstalkd/README.md @@ -2,7 +2,7 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). -### Configuration: +## Configuration ```toml [[inputs.beanstalkd]] @@ -14,11 +14,12 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by tubes = ["notifications"] ``` -### Metrics: +## Metrics Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output. `beanstalkd_overview` – statistical information about the system as a whole + - fields - cmd_delete - cmd_pause_tube @@ -38,6 +39,7 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) `beanstalkd_tube` – statistical information about the specified tube + - fields - binlog_current_index - binlog_max_size @@ -90,8 +92,9 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) - version -### Example Output: -``` +## Example + +```shell beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000 beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000 diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md index d819b5ab950b8..5f51271dad73d 100644 --- a/plugins/inputs/beat/README.md +++ b/plugins/inputs/beat/README.md @@ -1,7 +1,10 @@ # Beat Input Plugin + The Beat plugin will collect metrics from the given Beat instances. It is known to work with Filebeat and Kafkabeat. -### Configuration: + +## Configuration + ```toml ## An URL from which to read Beat-formatted JSON ## Default is "http://127.0.0.1:5066". @@ -35,9 +38,11 @@ known to work with Filebeat and Kafkabeat. ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` -### Measurements & Fields + +## Measurements & Fields + - **beat** - * Fields: + - Fields: - cpu_system_ticks - cpu_system_time_ms - cpu_total_ticks @@ -50,7 +55,7 @@ known to work with Filebeat and Kafkabeat. - memstats_memory_alloc - memstats_memory_total - memstats_rss - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -58,7 +63,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_filebeat** - * Fields: + - Fields: - events_active - events_added - events_done @@ -69,7 +74,7 @@ known to work with Filebeat and Kafkabeat. - harvester_started - input_log_files_renamed - input_log_files_truncated - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -77,7 +82,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_libbeat** - * Fields: + - Fields: - config_module_running - config_module_starts - config_module_stops @@ -105,7 +110,7 @@ known to work with Filebeat and Kafkabeat. - pipeline_events_retry - pipeline_events_total - pipeline_queue_acked - * Tags: + - Tags: - beat_beat - beat_host - beat_id @@ -113,7 +118,7 @@ known to work with Filebeat and Kafkabeat. - beat_version - **beat_system** - * Field: + - Field: - cpu_cores - load_1 - load_15 @@ -121,15 +126,16 @@ known to work with Filebeat and Kafkabeat. - load_norm_1 - load_norm_15 - load_norm_5 - * Tags: + - Tags: - beat_beat - beat_host - beat_id - beat_name - beat_version -### Example Output: -``` +## Example + +```shell $ telegraf --input-filter beat --test > beat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md index d67a02020f527..2ebda282c4b82 100644 --- a/plugins/inputs/bind/README.md +++ b/plugins/inputs/bind/README.md @@ -2,19 +2,19 @@ This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. -### XML Statistics Channel +## XML Statistics Channel Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile flag, and it must be specifically requested via the correct URL. Version 3 statistics are the default (and only) XML format in BIND 9.10+. -### JSON Statistics Channel +## JSON Statistics Channel JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do not enable support for JSON statistics in their BIND packages. -### Configuration: +## Configuration - **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a trailing slash in the URL. Default is "http://localhost:8053/xml/v3". @@ -27,15 +27,16 @@ version and configured statistics channel. | BIND Version | Statistics Format | Example URL | | ------------ | ----------------- | ----------------------------- | -| 9.6 - 9.8 | XML v2 | http://localhost:8053 | -| 9.9 | XML v2 | http://localhost:8053/xml/v2 | -| 9.9+ | XML v3 | http://localhost:8053/xml/v3 | -| 9.10+ | JSON v1 | http://localhost:8053/json/v1 | +| 9.6 - 9.8 | XML v2 | `http://localhost:8053` | +| 9.9 | XML v2 | `http://localhost:8053/xml/v2` | +| 9.9+ | XML v3 | `http://localhost:8053/xml/v3` | +| 9.10+ | JSON v1 | `http://localhost:8053/json/v1` | -#### Configuration of BIND Daemon +### Configuration of BIND Daemon Add the following to your named.conf if running Telegraf on the same host as the BIND daemon: -``` + +```json statistics-channels { inet 127.0.0.1 port 8053; }; @@ -46,7 +47,7 @@ configure the BIND daemon to listen on that address. Note that you should secure channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual for more information. -### Measurements & Fields: +## Measurements & Fields - bind_counter - name=value (multiple) @@ -60,7 +61,7 @@ for more information. - total - in_use -### Tags: +## Tags - All measurements - url @@ -73,7 +74,7 @@ for more information. - id - name -### Sample Queries: +## Sample Queries These are some useful queries (to generate dashboards or other) to run against data from this plugin: @@ -84,7 +85,7 @@ WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \ GROUP BY time(5m), "type" ``` -``` +```text name: bind_counter tags: type=qtype time non_negative_derivative_A non_negative_derivative_PTR @@ -104,11 +105,11 @@ time non_negative_derivative_A non_negative_derivative_PTR 1553865600000000000 280.6666666667443 1807.9071428570896 ``` -### Example Output +## Example Output Here is example output of this plugin: -``` +```shell bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000 diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index f7849e1735255..db2358239cc17 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -7,8 +7,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestBindJsonStats(t *testing.T) { @@ -29,7 +30,7 @@ func TestBindJsonStats(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -179,8 +180,8 @@ func TestBindJsonStats(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -202,7 +203,7 @@ func TestBindXmlStatsV2(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -384,8 +385,8 @@ func TestBindXmlStatsV2(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -407,7 +408,7 @@ func TestBindXmlStatsV3(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -611,8 +612,8 @@ func TestBindXmlStatsV3(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -623,5 +624,5 @@ func TestBindUnparseableURL(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.Contains(t, err.Error(), "unable to parse address") + require.Contains(t, err.Error(), "unable to parse address") } diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index d905038a9d533..9227df2bac61c 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -4,7 +4,7 @@ The Bond input plugin collects network bond interface status for both the network bond interface as well as slave interfaces. The plugin collects these metrics from `/proc/net/bonding/*` files. -### Configuration: +## Configuration ```toml [[inputs.bond]] @@ -18,7 +18,7 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. # bond_interfaces = ["bond0"] ``` -### Measurements & Fields: +## Measurements & Fields - bond - active_slave (for active-backup mode) @@ -29,9 +29,9 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - status - count -### Description: +## Description -``` +```shell active_slave Currently active slave interface for active-backup mode. @@ -45,7 +45,7 @@ count Number of slaves attached to bond ``` -### Tags: +## Tags - bond - bond @@ -54,11 +54,11 @@ count - bond - interface -### Example output: +## Example output Configuration: -``` +```toml [[inputs.bond]] ## Sets 'proc' directory path ## If not specified, then default is /proc @@ -72,13 +72,13 @@ Configuration: Run: -``` +```shell telegraf --config telegraf.conf --input-filter bond --test ``` Output: -``` +```shell * Plugin: inputs.bond, Collection 1 > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md index 1d763a430455f..2bdddf28cfd57 100644 --- a/plugins/inputs/burrow/README.md +++ b/plugins/inputs/burrow/README.md @@ -5,7 +5,7 @@ via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/l Supported Burrow version: `1.x` -### Configuration +## Configuration ```toml [[inputs.burrow]] @@ -50,7 +50,7 @@ Supported Burrow version: `1.x` # insecure_skip_verify = false ``` -### Group/Partition Status mappings +## Group/Partition Status mappings * `OK` = 1 * `NOT_FOUND` = 2 @@ -61,42 +61,41 @@ Supported Burrow version: `1.x` > unknown value will be mapped to 0 -### Fields +## Fields * `burrow_group` (one event per each consumer group) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - partition_count (int, `number of partitions`) - - offset (int64, `total offset of all partitions`) - - total_lag (int64, `totallag`) - - lag (int64, `maxlag.current_lag || 0`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * partition_count (int, `number of partitions`) + * offset (int64, `total offset of all partitions`) + * total_lag (int64, `totallag`) + * lag (int64, `maxlag.current_lag || 0`) + * timestamp (int64, `end.timestamp`) * `burrow_partition` (one event per each topic partition) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - lag (int64, `current_lag || 0`) - - offset (int64, `end.timestamp`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * lag (int64, `current_lag || 0`) + * offset (int64, `end.timestamp`) + * timestamp (int64, `end.timestamp`) * `burrow_topic` (one event per topic offset) - - offset (int64) + * offset (int64) - -### Tags +## Tags * `burrow_group` - - cluster (string) - - group (string) + * cluster (string) + * group (string) * `burrow_partition` - - cluster (string) - - group (string) - - topic (string) - - partition (int) - - owner (string) + * cluster (string) + * group (string) + * topic (string) + * partition (int) + * owner (string) * `burrow_topic` - - cluster (string) - - topic (string) - - partition (int) + * cluster (string) + * topic (string) + * partition (int) diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index 56c36bfe93d21..a68f07f2280e2 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -1,19 +1,21 @@ # Cassandra Input Plugin -### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration. +**Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration. + +## Plugin arguments -#### Plugin arguments: - **context** string: Context root used for jolokia url -- **servers** []string: List of servers with the format ":port" +- **servers** []string: List of servers with the format `:port`" - **metrics** []string: List of Jmx paths that identify mbeans attributes -#### Description +## Description The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. -See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) +See: [https://jolokia.org/](https://jolokia.org/) and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) + +## Measurements -# Measurements: Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name. Given a configuration like: @@ -43,30 +45,30 @@ Given a configuration like: The collected metrics will be: -``` +```shell javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084 ``` -# Useful Metrics: +## Useful Metrics Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web. - [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics) - [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) -#### measurement = javaGarbageCollector +### measurement = javaGarbageCollector - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount - /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount -#### measurement = javaMemory +### measurement = javaMemory - /java.lang:type=Memory/HeapMemoryUsage - /java.lang:type=Memory/NonHeapMemoryUsage -#### measurement = cassandraCache +### measurement = cassandraCache - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests @@ -79,11 +81,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity -#### measurement = cassandraClient +### measurement = cassandraClient - /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients -#### measurement = cassandraClientRequest +### measurement = cassandraClientRequest - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency @@ -96,24 +98,25 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures -#### measurement = cassandraCommitLog +### measurement = cassandraCommitLog - /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize -#### measurement = cassandraCompaction +### measurement = cassandraCompaction - /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks - /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted -#### measurement = cassandraStorage +### measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load - /org.apache.cassandra.metrics:type=Storage,name=Exceptions -#### measurement = cassandraTable +### measurement = cassandraTable + Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed @@ -124,20 +127,17 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency - -#### measurement = cassandraThreadPools +### measurement = cassandraThreadPools - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks - - diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index f167f50e7187f..35551cf847970 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -1,15 +1,14 @@ package cassandra import ( - _ "fmt" "io" "net/http" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validJavaMultiValueJSON = ` @@ -138,8 +137,8 @@ func TestHttpJsonJavaMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "HeapMemoryUsage_init": 67108864.0, @@ -167,8 +166,8 @@ func TestHttpJsonJavaMultiType(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "CollectionCount": 1.0, @@ -188,9 +187,9 @@ func TestHttp404(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected for class=Cassandra @@ -200,8 +199,8 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "ReadLatency_999thPercentile": 20.0, @@ -232,8 +231,8 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields1 := map[string]interface{}{ "ReadLatency_999thPercentile": 1.0, diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 5d5afadc19fad..3d1745884b171 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -4,7 +4,7 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluste Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](https://docs.ceph.com/en/latest/mgr/telegraf/) -*Admin Socket Stats* +## Admin Socket Stats This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** @@ -26,23 +26,22 @@ used as collection tags, and all sub-keys are flattened. For example: Would be parsed into the following metrics, all of which would be tagged with collection=paxos: - - refresh = 9363435 - - refresh_latency.avgcount: 9363435 - - refresh_latency.sum: 5378.794002000 +- refresh = 9363435 +- refresh_latency.avgcount: 9363435 +- refresh_latency.sum: 5378.794002000 - -*Cluster Stats* +## Cluster Stats This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work in conjunction to specify these prerequisites). It may be run on any server you wish which has access to the cluster. The currently supported commands are: -* ceph status -* ceph df -* ceph osd pool stats +- ceph status +- ceph df +- ceph osd pool stats -### Configuration: +## Configuration ```toml # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. @@ -89,9 +88,9 @@ the cluster. The currently supported commands are: gather_cluster_stats = false ``` -### Metrics: +## Metrics -*Admin Socket Stats* +### Admin Socket All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. @@ -167,9 +166,9 @@ All admin measurements will have the following tags: - throttle-objecter_ops - throttle-rgw_async_rados_ops -*Cluster Stats* +## Cluster -+ ceph_health +- ceph_health - fields: - status - overall_status @@ -184,7 +183,7 @@ All admin measurements will have the following tags: - nearfull (bool) - num_remapped_pgs (float) -+ ceph_pgmap +- ceph_pgmap - fields: - version (float) - num_pgs (float) @@ -204,7 +203,7 @@ All admin measurements will have the following tags: - fields: - count (float) -+ ceph_usage +- ceph_usage - fields: - total_bytes (float) - total_used_bytes (float) @@ -223,7 +222,7 @@ All admin measurements will have the following tags: - percent_used (float) - max_avail (float) -+ ceph_pool_stats +- ceph_pool_stats - tags: - name - fields: @@ -236,12 +235,11 @@ All admin measurements will have the following tags: - recovering_bytes_per_sec (float) - recovering_keys_per_sec (float) +## Example -### Example Output: - -*Cluster Stats* +Below is an example of a custer stats: -``` +```shell ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000 ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000 ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000 @@ -251,9 +249,9 @@ ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avai ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000 ``` -*Admin Socket Stats* +Below is an example of admin socket stats: -``` +```shell > ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000 > ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000 > ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 7d0eede0f7f10..4fbb696dbd80e 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -10,38 +10,35 @@ Following file formats are supported: * Single value -``` +```text VAL\n ``` * New line separated values -``` +```text VAL0\n VAL1\n ``` * Space separated values -``` +```text VAL0 VAL1 ...\n ``` * Space separated keys and value, separated by new line -``` +```text KEY0 ... VAL0\n KEY1 ... VAL1\n ``` +## Tags -### Tags: - -All measurements have the following tags: - - path - +All measurements have the `path` tag. -### Configuration: +## Configuration ```toml # Read specific statistics per cgroup @@ -60,7 +57,7 @@ All measurements have the following tags: # files = ["memory.*usage*", "memory.limit_in_bytes"] ``` -### usage examples: +## Example ```toml # [[inputs.cgroup]] diff --git a/plugins/inputs/chrony/README.md b/plugins/inputs/chrony/README.md index aa4f848065297..ebb7ba65c6d49 100644 --- a/plugins/inputs/chrony/README.md +++ b/plugins/inputs/chrony/README.md @@ -51,7 +51,7 @@ Dispersion is due to system clock resolution, statistical measurement variations - Leap status - This is the leap status, which can be Normal, Insert second, Delete second or Not synchronised. -### Configuration: +## Configuration ```toml # Get standard chrony metrics, requires chronyc executable. @@ -60,34 +60,30 @@ Delete second or Not synchronised. # dns_lookup = false ``` -### Measurements & Fields: +## Measurements & Fields - chrony - - system_time (float, seconds) - - last_offset (float, seconds) - - rms_offset (float, seconds) - - frequency (float, ppm) - - residual_freq (float, ppm) - - skew (float, ppm) - - root_delay (float, seconds) - - root_dispersion (float, seconds) - - update_interval (float, seconds) + - system_time (float, seconds) + - last_offset (float, seconds) + - rms_offset (float, seconds) + - frequency (float, ppm) + - residual_freq (float, ppm) + - skew (float, ppm) + - root_delay (float, seconds) + - root_dispersion (float, seconds) + - update_interval (float, seconds) -### Tags: +### Tags - All measurements have the following tags: - - reference_id - - stratum - - leap_status + - reference_id + - stratum + - leap_status -### Example Output: +### Example Output -``` +```shell $ telegraf --config telegraf.conf --input-filter chrony --test * Plugin: chrony, Collection 1 > chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161 ``` - - - - diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index f4ca7243b8cde..b3e641cc3c502 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -9,8 +9,7 @@ The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and lat The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later. - -### Configuration: +## Configuration ```toml [[inputs.cisco_telemetry_mdt]] @@ -53,14 +52,16 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' ``` -### Example Output: -``` +## Example Output + +```shell ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 ``` -### NX-OS Configuration Example: -``` +### NX-OS Configuration Example + +```text Requirement DATA-SOURCE Configuration ----------------------------------------- Environment DME path sys/ch query-condition query-target=subtree&target-subtree-class=eqptPsuSlot,eqptFtSlot,eqptSupCSlot,eqptPsu,eqptFt,eqptSensor,eqptLCSlot @@ -92,13 +93,11 @@ multicast igmp NXAPI show ip igmp snooping groups multicast igmp NXAPI show ip igmp snooping groups detail multicast igmp NXAPI show ip igmp snooping groups summary multicast igmp NXAPI show ip igmp snooping mrouter -multicast igmp NXAPI show ip igmp snooping statistics +multicast igmp NXAPI show ip igmp snooping statistics multicast pim NXAPI show ip pim interface vrf all multicast pim NXAPI show ip pim neighbor vrf all multicast pim NXAPI show ip pim route vrf all multicast pim NXAPI show ip pim rp vrf all multicast pim NXAPI show ip pim statistics vrf all multicast pim NXAPI show ip pim vrf all - - ``` diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 10f1f764c0515..25b5ec9758962 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -15,11 +15,11 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "google.golang.org/grpc" "google.golang.org/grpc/credentials" _ "google.golang.org/grpc/encoding/gzip" // Register GRPC gzip decoder to support compressed telemetry "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" @@ -61,6 +61,9 @@ type CiscoTelemetryMDT struct { mutex sync.Mutex acc telegraf.Accumulator wg sync.WaitGroup + + // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility + dialout.UnimplementedGRPCMdtDialoutServer } type NxPayloadXfromStructure struct { diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index 745b26dea4b20..90fc949276948 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -10,9 +10,9 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Cannot switch to "google.golang.org/protobuf/proto", "github.com/golang/protobuf/proto" is used by "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go index 8f6ea93eab4b3..1d7d95a95a757 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -1,9 +1,10 @@ package cisco_telemetry_mdt import ( - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "strconv" "strings" + + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" ) //xform Field to string diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index 9b9e6caa904f7..b7bbe85c0de5c 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -2,7 +2,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. -### Configuration +## Configuration + ```toml # Read metrics from one or many ClickHouse servers [[inputs.clickhouse]] @@ -71,7 +72,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic # insecure_skip_verify = false ``` -### Metrics +## Metrics - clickhouse_events - tags: @@ -81,7 +82,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.events][] -+ clickhouse_metrics +- clickhouse_metrics - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -97,7 +98,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.asynchronous_metrics][] -+ clickhouse_tables +- clickhouse_tables - tags: - source (ClickHouse server hostname) - table @@ -115,9 +116,9 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - root_nodes (count of node from [system.zookeeper][] where path=/) + - root_nodes (count of node from [system.zookeeper][] where path=/) -+ clickhouse_replication_queue +- clickhouse_replication_queue - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -132,8 +133,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - shard_num (Shard number in the cluster [optional]) - fields: - detached_parts (total detached parts for all tables and databases from [system.detached_parts][]) - -+ clickhouse_dictionaries + +- clickhouse_dictionaries - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -153,7 +154,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - failed - counter which show total failed mutations from first clickhouse-server run - completed - counter which show total successful finished mutations from first clickhouse-server run -+ clickhouse_disks +- clickhouse_disks - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -161,8 +162,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - name (disk name in storage configuration) - path (path to disk) - fields: - - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes - - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes + - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes + - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes - clickhouse_processes - tags: @@ -170,8 +171,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details - - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details - longest_running - float gauge which show maximum value for `elapsed` field of running processes, see [system.processes][] for details - clickhouse_text_log @@ -179,13 +180,13 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] + - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] - fields: - messages_last_10_min - gauge which show how many messages collected - -### Example Output -``` +### Examples + +```text clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 @@ -196,10 +197,10 @@ clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,hos [system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/ [system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/ [system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/ -[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ +[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ [system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/ -[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ -[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ -[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ -[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ -[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ +[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ +[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ +[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ +[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ +[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index a4244b881cb62..d05fea611ebdd 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -3,8 +3,7 @@ The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub] and creates metrics using one of the supported [input data formats][]. - -### Configuration +## Configuration ```toml [[inputs.cloud_pubsub]] @@ -26,8 +25,8 @@ and creates metrics using one of the supported [input data formats][]. ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. ## If the streaming pull for a PubSub Subscription fails (receiver), ## the agent attempts to restart receiving messages after this many seconds. # retry_delay_seconds = 5 @@ -76,7 +75,7 @@ and creates metrics using one of the supported [input data formats][]. ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 - ## Optional. If true, Telegraf will attempt to base64 decode the + ## Optional. If true, Telegraf will attempt to base64 decode the ## PubSub message data before parsing. Many GCP services that ## output JSON to Google PubSub base64-encode the JSON payload. # base64_data = false @@ -91,8 +90,6 @@ Each plugin agent can listen to one subscription at a time, so you will need to run multiple instances of the plugin to pull messages from multiple subscriptions/topics. - - [pubsub]: https://cloud.google.com/pubsub [pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go index d07dfe34f2290..e27c1e8104bcf 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -5,9 +5,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) const ( @@ -53,7 +54,7 @@ func TestRunParse(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -98,7 +99,7 @@ func TestRunBase64(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } @@ -145,7 +146,7 @@ func TestRunInvalidMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunOverlongMessages(t *testing.T) { @@ -192,7 +193,7 @@ func TestRunOverlongMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunErrorInSubscriber(t *testing.T) { @@ -228,12 +229,12 @@ func TestRunErrorInSubscriber(t *testing.T) { t.Fatal("expected plugin subscription to be non-nil") } acc.WaitError(1) - assert.Regexp(t, fakeErrStr, acc.Errors[0]) + require.Regexp(t, fakeErrStr, acc.Errors[0]) } func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { - assert.Equal(t, "cpu_load_short", m.Measurement) - assert.Equal(t, "server01", m.Tags["host"]) - assert.Equal(t, 23422.0, m.Fields["value"]) - assert.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) + require.Equal(t, "cpu_load_short", m.Measurement) + require.Equal(t, "server01", m.Tags["host"]) + require.Equal(t, 23422.0, m.Fields["value"]) + require.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) } diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md index 3173b43361fb6..3163d5bb4ba1b 100644 --- a/plugins/inputs/cloud_pubsub_push/README.md +++ b/plugins/inputs/cloud_pubsub_push/README.md @@ -9,8 +9,7 @@ Enable TLS by specifying the file names of a service TLS certificate and key. Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`. - -### Configuration: +## Configuration This is a sample configuration for the plugin. diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 97592f5197ab7..a904eb1ea7b0e 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -2,10 +2,11 @@ This plugin will pull Metric Statistics from Amazon CloudWatch. -### Amazon Authentication +## Amazon Authentication This plugin uses a credential chain for Authentication with the CloudWatch API endpoint. In the following order the plugin will attempt to authenticate. + 1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) 2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes 3. Shared profile from `profile` attribute @@ -13,7 +14,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. 5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file) 6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) -### Configuration: +## Configuration ```toml # Pull Metric Statistics from Amazon CloudWatch @@ -112,7 +113,8 @@ API endpoint. In the following order the plugin will attempt to authenticate. # name = "LoadBalancerName" # value = "p-example" ``` -#### Requirements and Terminology + +## Requirements and Terminology Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric. @@ -127,7 +129,8 @@ to be retrieved. If specifying >1 dimension, then the metric must contain *all* wildcard dimension is ignored. Example: -``` + +```toml [[inputs.cloudwatch]] period = "1m" interval = "5m" @@ -146,13 +149,14 @@ Example: ``` If the following ELBs are available: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` - name: `q-example`, availabilityZone: `us-east-1a` - name: `q-example`, availabilityZone: `us-east-1b` - Then 2 metrics will be output: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` @@ -161,11 +165,12 @@ would be exported containing the aggregate values of the ELB across availability To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart. -#### Restrictions and Limitations +## Restrictions and Limitations + - CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) - CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/) -### Measurements & Fields: +## Measurements & Fields Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic. Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -177,8 +182,8 @@ Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/w - {metric}_maximum (metric Maximum value) - {metric}_sample_count (metric SampleCount value) +## Tags -### Tags: Each measurement is tagged with the following identifiers to uniquely identify the associated metric Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) @@ -186,17 +191,19 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik - region (CloudWatch Region) - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) -### Troubleshooting: +## Troubleshooting You can use the aws cli to get a list of available metrics and dimensions: -``` + +```shell aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name CPUCreditBalance ``` If the expected metrics are not returned, you can try getting them manually for a short period of time: -``` + +```shell aws cloudwatch get-metric-data \ --start-time 2018-07-01T00:00:00Z \ --end-time 2018-07-01T00:15:00Z \ @@ -222,9 +229,9 @@ aws cloudwatch get-metric-data \ ]' ``` -### Example Output: +## Example -``` +```shell $ ./telegraf --config telegraf.conf --input-filter cloudwatch --test > cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 ``` diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 17305f31c93a6..bf0ab65bf3b8c 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -477,7 +477,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), MetricStat: &types.MetricStat{ - Metric: &metric, + Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), Stat: aws.String(StatisticAverage), }, @@ -489,7 +489,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), MetricStat: &types.MetricStat{ - Metric: &metric, + Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), Stat: aws.String(StatisticMaximum), }, @@ -501,7 +501,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), MetricStat: &types.MetricStat{ - Metric: &metric, + Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), Stat: aws.String(StatisticMinimum), }, @@ -513,7 +513,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), MetricStat: &types.MetricStat{ - Metric: &metric, + Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), Stat: aws.String(StatisticSum), }, @@ -525,7 +525,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), MetricStat: &types.MetricStat{ - Metric: &metric, + Metric: &filtered.metrics[j], Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), Stat: aws.String(StatisticSampleCount), }, diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md index 2e5fb8861dec1..74f4bd9ed567f 100644 --- a/plugins/inputs/conntrack/README.md +++ b/plugins/inputs/conntrack/README.md @@ -3,23 +3,22 @@ Collects stats from Netfilter's conntrack-tools. The conntrack-tools provide a mechanism for tracking various aspects of -network connections as they are processed by netfilter. At runtime, +network connections as they are processed by netfilter. At runtime, conntrack exposes many of those connection statistics within /proc/sys/net. Depending on your kernel version, these files can be found in either /proc/sys/net/ipv4/netfilter or /proc/sys/net/netfilter and will be -prefixed with either ip_ or nf_. This plugin reads the files specified +prefixed with either ip or nf. This plugin reads the files specified in its configuration and publishes each one as a field, with the prefix -normalized to ip_. +normalized to ip_. In order to simplify configuration in a heterogeneous environment, a superset of directory and filenames can be specified. Any locations that don't exist will be ignored. -For more information on conntrack-tools, see the +For more information on conntrack-tools, see the [Netfilter Documentation](http://conntrack-tools.netfilter.org/). - -### Configuration: +## Configuration ```toml # Collects conntrack stats from the configured directories and files. @@ -38,19 +37,19 @@ For more information on conntrack-tools, see the dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ``` -### Measurements & Fields: +## Measurements & Fields - conntrack - - ip_conntrack_count (int, count): the number of entries in the conntrack table - - ip_conntrack_max (int, size): the max capacity of the conntrack table + - ip_conntrack_count (int, count): the number of entries in the conntrack table + - ip_conntrack_max (int, size): the max capacity of the conntrack table -### Tags: +## Tags This input does not use tags. -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter conntrack --test conntrack,host=myhost ip_conntrack_count=2,ip_conntrack_max=262144 1461620427667995735 ``` diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 71d7d26a8f5eb..609a8dfb8840f 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -6,7 +6,7 @@ to query the data. It will not report the [telemetry](https://www.consul.io/docs/agent/telemetry.html) but Consul can report those stats already using StatsD protocol if needed. -### Configuration: +## Configuration ```toml # Gather health check statuses from services registered in Consul @@ -48,13 +48,15 @@ report those stats already using StatsD protocol if needed. # tag_delimiter = ":" ``` -### Metrics: -##### metric_version = 1: +## Metrics + +### metric_version = 1 + - consul_health_checks - tags: - - node (node that check/service is registered on) - - service_name - - check_id + - node (node that check/service is registered on) + - service_name + - check_id - fields: - check_name - service_id @@ -63,27 +65,28 @@ report those stats already using StatsD protocol if needed. - critical (integer) - warning (integer) -##### metric_version = 2: +### metric_version = 2 + - consul_health_checks - tags: - - node (node that check/service is registered on) - - service_name - - check_id - - check_name + - node (node that check/service is registered on) + - service_name + - check_id + - check_name - service_id - status - fields: - passing (integer) - critical (integer) - warning (integer) - + `passing`, `critical`, and `warning` are integer representations of the health check state. A value of `1` represents that the status was the state of the the health check at this sample. `status` is string representation of the same state. ## Example output -``` +```shell consul_health_checks,host=wolfpit,node=consul-server-node,check_id="serfHealth" check_name="Serf Health Status",service_id="",status="passing",passing=1i,critical=0i,warning=0i 1464698464486439902 consul_health_checks,host=wolfpit,node=consul-server-node,service_name=www.example.com,check_id="service:www-example-com.test01" check_name="Service 'www.example.com' check",service_id="www-example-com.test01",status="critical",passing=0i,critical=1i,warning=0i 1464698464486519036 ``` diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 1acdaea4ac76e..be39100c7d203 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,8 +1,9 @@ # Couchbase Input Plugin + Couchbase is a distributed NoSQL database. This plugin gets metrics for each Couchbase node, as well as detailed metrics for each bucket, for a given couchbase server. -## Configuration: +## Configuration ```toml # Read per-node and per-bucket metrics from Couchbase @@ -30,25 +31,29 @@ This plugin gets metrics for each Couchbase node, as well as detailed metrics fo # insecure_skip_verify = false ``` -## Measurements: +## Measurements ### couchbase_node Tags: + - cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint` - hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091` Fields: + - memory_free (unit: bytes, example: 23181365248.0) - memory_total (unit: bytes, example: 64424656896.0) ### couchbase_bucket Tags: + - cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) - bucket: the name of the couchbase bucket, e.g., `blastro-df` Default bucket fields: + - quota_percent_used (unit: percent, example: 68.85424936294555) - ops_per_sec (unit: count, example: 5686.789686789687) - disk_fetches (unit: count, example: 0.0) @@ -58,7 +63,8 @@ Default bucket fields: - mem_used (unit: bytes, example: 202156957464.0) Additional fields that can be configured with the `bucket_stats_included` option: -- couch_total_disk_size + +- couch_total_disk_size - couch_docs_fragmentation - couch_views_fragmentation - hit_ratio @@ -274,10 +280,9 @@ Additional fields that can be configured with the `bucket_stats_included` option - swap_total - swap_used - ## Example output -``` +```shell couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000 couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000 ``` diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 3a7f127dbc3db..a7a6a42f9bcae 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -2,7 +2,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. -### Configuration +## Configuration ```toml [[inputs.couchdb]] @@ -15,7 +15,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. # basic_password = "p@ssw0rd" ``` -### Measurements & Fields: +## Measurements & Fields Statistics specific to the internals of CouchDB: @@ -60,19 +60,21 @@ httpd statistics: - httpd_bulk_requests - httpd_view_reads -### Tags: +## Tags - server (url of the couchdb _stats endpoint) -### Example output: +## Example -**Post Couchdb 2.0** -``` +### Post Couchdb 2.0 + +```shell couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits_value=0,httpd_request_methods_delete_value=0,couchdb_auth_cache_misses_value=0,httpd_request_methods_get_value=42,httpd_status_codes_304_value=0,httpd_status_codes_400_value=0,httpd_request_methods_head_value=0,httpd_status_codes_201_value=0,couchdb_database_reads_value=0,httpd_request_methods_copy_value=0,couchdb_request_time_max=0,httpd_status_codes_200_value=42,httpd_status_codes_301_value=0,couchdb_open_os_files_value=2,httpd_request_methods_put_value=0,httpd_request_methods_post_value=0,httpd_status_codes_202_value=0,httpd_status_codes_403_value=0,httpd_status_codes_409_value=0,couchdb_database_writes_value=0,couchdb_request_time_min=0,httpd_status_codes_412_value=0,httpd_status_codes_500_value=0,httpd_status_codes_401_value=0,httpd_status_codes_404_value=0,httpd_status_codes_405_value=0,couchdb_open_databases_value=0 1536707179000000000 ``` -**Pre Couchdb 2.0** -``` +### Pre Couchdb 2.0 + +```shell couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 ``` diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index 8e2ef66f92451..5b82b038d768a 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -2,7 +2,8 @@ The `cpu` plugin gather metrics on the system CPUs. -#### Configuration +## Configuration + ```toml # Read metrics about cpu usage [[inputs.cpu]] @@ -16,7 +17,7 @@ The `cpu` plugin gather metrics on the system CPUs. report_active = false ``` -### Metrics +## Metrics On Linux, consult `man proc` for details on the meanings of these values. @@ -47,14 +48,14 @@ On Linux, consult `man proc` for details on the meanings of these values. - usage_guest (float, percent) - usage_guest_nice (float, percent) -### Troubleshooting +## Troubleshooting On Linux systems the `/proc/stat` file is used to gather CPU times. Percentages are based on the last 2 samples. -### Example Output +## Example Output -``` +```shell cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000 cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000 cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000 diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index 9e795c82a589d..8c22bb923506e 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - cpuUtil "github.com/shirou/gopsutil/cpu" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index e51660a0adee6..3dc3242a6ed94 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - cpuUtil "github.com/shirou/gopsutil/cpu" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/system" diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md index b335509400426..e6fded0fb27ec 100644 --- a/plugins/inputs/csgo/README.md +++ b/plugins/inputs/csgo/README.md @@ -2,7 +2,8 @@ The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. -#### Configuration +## Configuration + ```toml # Fetch metrics from a CSGO SRCDS [[inputs.csgo]] @@ -16,7 +17,7 @@ The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. servers = [] ``` -### Metrics +## Metrics The plugin retrieves the output of the `stats` command that is executed via rcon. diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go index b1d1c9b693814..ca849819842ed 100644 --- a/plugins/inputs/csgo/csgo_test.go +++ b/plugins/inputs/csgo/csgo_test.go @@ -1,10 +1,11 @@ package csgo import ( - "github.com/influxdata/telegraf/testutil" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const testInput = `CPU NetIn NetOut Uptime Maps FPS Players Svms +-ms ~tick @@ -28,17 +29,17 @@ func TestCPUStats(t *testing.T) { t.Errorf("acc.HasMeasurement: expected csgo") } - assert.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) - assert.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) - assert.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) - assert.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) - assert.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) - assert.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) - assert.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) - assert.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) - assert.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) - assert.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) - assert.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) + require.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) + require.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) + require.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) + require.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) + require.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) + require.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) + require.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) + require.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) + require.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) + require.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) + require.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) } func requestMock(_ string, _ string) (string, error) { diff --git a/plugins/inputs/dcos/README.md b/plugins/inputs/dcos/README.md index 4c9d46a921a6b..cd3a0c73929b9 100644 --- a/plugins/inputs/dcos/README.md +++ b/plugins/inputs/dcos/README.md @@ -2,7 +2,7 @@ This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/). -**Series Cardinality Warning** +## Series Cardinality Warning Depending on the work load of your DC/OS cluster, this plugin can quickly create a high number of series which, when unchecked, can cause high load on @@ -18,7 +18,8 @@ your database. - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration: +## Configuration + ```toml [[inputs.dcos]] ## The DC/OS cluster URL. @@ -63,13 +64,14 @@ your database. # path = ["/var/lib/mesos/slave/slaves/*"] ``` -#### Enterprise Authentication +### Enterprise Authentication When using Enterprise DC/OS, it is recommended to use a service account to authenticate with the cluster. The plugin requires the following permissions: -``` + +```text dcos:adminrouter:ops:system-metrics full dcos:adminrouter:ops:mesos full ``` @@ -77,14 +79,15 @@ dcos:adminrouter:ops:mesos full Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/). Quick configuration using the Enterprise CLI: -``` + +```text dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full dcos security org users grant telegraf dcos:adminrouter:ops:mesos full ``` -#### Open Source Authentication +### Open Source Authentication The Open Source DC/OS does not provide service accounts. Instead you can use of the following options: @@ -95,7 +98,8 @@ of the following options: Then `token_file` can be set by using the [dcos cli] to login periodically. The cli can login for at most XXX days, you will need to ensure the cli performs a new login before this time expires. -``` + +```shell dcos auth login --username foo --password bar dcos config show core.dcos_acs_token > ~/.dcos/token ``` @@ -107,7 +111,7 @@ token is compromised it cannot be revoked and may require a full reinstall of the cluster. For more information on this technique reference [this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add). -### Metrics: +## Metrics Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/) for details about field interpretation. @@ -185,9 +189,9 @@ for details about field interpretation. - fields: - fields are application specific -### Example Output: +## Example -``` +```shell dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 08943d13db0f9..34ab30ea52274 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v4" ) const ( @@ -100,7 +100,7 @@ type ClusterClient struct { type claims struct { UID string `json:"uid"` - jwt.StandardClaims + jwt.RegisteredClaims } func (e APIError) Error() string { @@ -327,9 +327,9 @@ func (c *ClusterClient) toURL(path string) string { func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{ UID: sa.AccountID, - StandardClaims: jwt.StandardClaims{ + RegisteredClaims: jwt.RegisteredClaims{ // How long we have to login with this token - ExpiresAt: time.Now().Add(time.Minute * 5).Unix(), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Minute * 5)), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index dd8f22f7292f5..5712afcfa3ca6 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -9,7 +9,7 @@ import ( "sync" "time" - jwt "github.com/golang-jwt/jwt/v4" + "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" @@ -237,9 +237,7 @@ func (d *DCOS) createPoints(m *Metrics) []*point { fieldKey = fieldKey + "_bytes" } - if strings.HasPrefix(fieldKey, "dcos_metrics_module_") { - fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") - } + fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") tagset := make([]string, 0, len(tags)) for k, v := range tags { diff --git a/plugins/inputs/deprecations.go b/plugins/inputs/deprecations.go new file mode 100644 index 0000000000000..14a497baff30a --- /dev/null +++ b/plugins/inputs/deprecations.go @@ -0,0 +1,48 @@ +package inputs + +import "github.com/influxdata/telegraf" + +// Deprecations lists the deprecated plugins +var Deprecations = map[string]telegraf.DeprecationInfo{ + "cassandra": { + Since: "1.7.0", + Notice: "use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead", + }, + "io": { + Since: "0.10.0", + RemovalIn: "2.0.0", + Notice: "use 'inputs.diskio' instead", + }, + "http_listener_v2": { + Since: "1.9.0", + Notice: "has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.influxdb_listener_v2' instead", + }, + "httpjson": { + Since: "1.6.0", + Notice: "use 'inputs.http' instead", + }, + "jolokia": { + Since: "1.5.0", + Notice: "use 'inputs.jolokia2' instead", + }, + "kafka_consumer_legacy": { + Since: "1.4.0", + Notice: "use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+", + }, + "logparser": { + Since: "1.15.0", + Notice: "use 'inputs.tail' with 'grok' data format instead", + }, + "snmp_legacy": { + Since: "1.0.0", + Notice: "use 'inputs.snmp' instead", + }, + "tcp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, + "udp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, +} diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md index 4e260f44256ed..c4ad4c20e3eed 100644 --- a/plugins/inputs/directory_monitor/README.md +++ b/plugins/inputs/directory_monitor/README.md @@ -5,7 +5,7 @@ The plugin will gather all files in the directory at a configurable interval (`m This plugin is intended to read files that are moved or copied to the monitored directory, and thus files should also not be used by another process or else they may fail to be gathered. Please be advised that this plugin pulls files directly after they've been in the directory for the length of the configurable `directory_duration_threshold`, and thus files should not be written 'live' to the monitored directory. If you absolutely must write files directly, they must be guaranteed to finish writing before the `directory_duration_threshold`. -### Configuration: +## Configuration ```toml [[inputs.directory_monitor]] @@ -22,7 +22,7 @@ This plugin is intended to read files that are moved or copied to the monitored ## The amount of time a file is allowed to sit in the directory before it is picked up. ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, ## set this higher so that the plugin will wait until the file is fully copied to the directory. - # directory_duration_threshold = "50ms" + # directory_duration_threshold = "50ms" # ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. # files_to_monitor = ["^.*\.csv"] @@ -37,11 +37,11 @@ This plugin is intended to read files that are moved or copied to the monitored # ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. - # file_queue_size = 100000 + # file_queue_size = 100000 # ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" # diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go index ee1163e7a51b1..6c115bdf9769b 100644 --- a/plugins/inputs/directory_monitor/directory_monitor.go +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -261,15 +261,12 @@ func (monitor *DirectoryMonitor) ingestFile(filePath string) error { } func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader, fileName string) error { - // Read the file line-by-line and parse with the configured parse method. - firstLine := true scanner := bufio.NewScanner(reader) for scanner.Scan() { - metrics, err := monitor.parseLine(parser, scanner.Bytes(), firstLine) + metrics, err := monitor.parseLine(parser, scanner.Bytes()) if err != nil { return err } - firstLine = false if monitor.FileTag != "" { for _, m := range metrics { @@ -285,24 +282,17 @@ func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Read return nil } -func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte, firstLine bool) ([]telegraf.Metric, error) { +func (monitor *DirectoryMonitor) parseLine(parser parsers.Parser, line []byte) ([]telegraf.Metric, error) { switch parser.(type) { case *csv.Parser: - // The CSV parser parses headers in Parse and skips them in ParseLine. - if firstLine { - return parser.Parse(line) - } - - m, err := parser.ParseLine(string(line)) + m, err := parser.Parse(line) if err != nil { + if errors.Is(err, io.EOF) { + return nil, nil + } return nil, err } - - if m != nil { - return []telegraf.Metric{m}, nil - } - - return []telegraf.Metric{}, nil + return m, err default: return parser.Parse(line) } diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go index 3e954adb40320..3245074711fb2 100644 --- a/plugins/inputs/directory_monitor/directory_monitor_test.go +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -3,12 +3,11 @@ package directory_monitor import ( "bytes" "compress/gzip" + "github.com/stretchr/testify/require" "os" "path/filepath" "testing" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" ) @@ -193,3 +192,224 @@ func TestFileTag(t *testing.T) { } } } + +func TestCSVNoSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + CSVSkipRows: 0, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 1, + CSVSkipRows: 2, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `garbage nonsense 1 +garbage,nonsense,2 +line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVMultiHeader(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory, err := os.MkdirTemp("", "finished") + require.NoError(t, err) + processDirectory, err := os.MkdirTemp("", "test") + require.NoError(t, err) + defer os.RemoveAll(processDirectory) + defer os.RemoveAll(finishedDirectory) + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: 1000, + FileQueueSize: 100000, + } + err = r.Init() + require.NoError(t, err) + + parserConfig := parsers.Config{ + DataFormat: "csv", + CSVHeaderRowCount: 2, + CSVTagColumns: []string{"line1"}, + } + require.NoError(t, err) + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + r.Log = testutil.Logger{} + + testCSV := `line,line,line +1,2,3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index b0a8ac05a6c19..a055a61c98299 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -4,9 +4,9 @@ The disk input plugin gathers metrics about disk usage. Note that `used_percent` is calculated by doing `used / (used + free)`, _not_ `used / total`, which is how the unix `df` command does it. See -https://en.wikipedia.org/wiki/Df_(Unix) for more details. +[wikipedia - df](https://en.wikipedia.org/wiki/Df_(Unix)) for more details. -### Configuration: +## Configuration ```toml [[inputs.disk]] @@ -18,7 +18,7 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ``` -#### Docker container +### Docker container To monitor the Docker engine host from within a container you will need to mount the host's filesystem into the container and set the `HOST_PROC` @@ -27,11 +27,11 @@ also set the `HOST_MOUNT_PREFIX` environment variable to the prefix containing the `/proc` directory, when present this variable is stripped from the reported `path` tag. -``` +```shell docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - disk - tags: @@ -48,25 +48,27 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_total (integer, files) - inodes_used (integer, files) -### Troubleshooting +## Troubleshooting On Linux, the list of disks is taken from the `/proc/self/mounts` file and a [statfs] call is made on the second column. If any expected filesystems are missing ensure that the `telegraf` user can read these files: -``` + +```shell $ sudo -u telegraf cat /proc/self/mounts | grep sda2 /dev/sda2 /home ext4 rw,relatime,data=ordered 0 0 $ sudo -u telegraf stat /home ``` It may be desired to use POSIX ACLs to provide additional access: -``` + +```shell sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ ``` -### Example Output: +## Example -``` +```shell disk,fstype=hfs,mode=ro,path=/ free=398407520256i,inodes_free=97267461i,inodes_total=121847806i,inodes_used=24580345i,total=499088621568i,used=100418957312i,used_percent=20.131039916242397 1453832006274071563 disk,fstype=devfs,mode=rw,path=/dev free=0i,inodes_free=0i,inodes_total=628i,inodes_used=628i,total=185856i,used=185856i,used_percent=100 1453832006274137913 disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274157077 diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index 0a0fbf6f728a3..fc552a232b799 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -52,7 +52,7 @@ func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { // Skip dummy filesystem (procfs, cgroupfs, ...) continue } - mountOpts := parseOptions(partitions[i].Opts) + mountOpts := MountOptions(partitions[i].Opts) tags := map[string]string{ "path": du.Path, "device": strings.Replace(partitions[i].Device, "/dev/", "", -1), @@ -101,10 +101,6 @@ func (opts MountOptions) exists(opt string) bool { return false } -func parseOptions(opts string) MountOptions { - return strings.Split(opts, ",") -} - func init() { ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index 47a822b4410bf..22dd947406ff5 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - diskUtil "github.com/shirou/gopsutil/disk" + diskUtil "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -30,13 +30,13 @@ func TestDiskUsage(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, }, } duAll := []diskUtil.UsageStat{ @@ -137,7 +137,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -169,7 +169,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/hostfs/var", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -202,7 +202,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { Device: "/dev/sda", Mountpoint: "/hostfs", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, usageStats: []*diskUtil.UsageStat{ @@ -301,13 +301,13 @@ func TestDiskStats(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, }, } @@ -316,7 +316,7 @@ func TestDiskStats(t *testing.T) { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, } diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 11e68d6961ee0..1e99e81fef8c5 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -2,7 +2,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. -### Configuration: +## Configuration ```toml # Read metrics about disk IO by device @@ -34,7 +34,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ``` -#### Docker container +### Docker container To monitor the Docker engine host from within a container you will need to mount the host's filesystem into the container and set the `HOST_PROC` @@ -44,11 +44,11 @@ it is required to use privileged mode to provide access to `/dev`. If you are using the `device_tags` or `name_templates` options, you will need to bind mount `/run/udev` into the container. -``` +```shell docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - diskio - tags: @@ -72,16 +72,16 @@ On linux these values correspond to the values in and [`/sys/block//stat`](https://www.kernel.org/doc/Documentation/block/stat.txt). -#### `reads` & `writes`: +### `reads` & `writes` These values increment when an I/O request completes. -#### `read_bytes` & `write_bytes`: +### `read_bytes` & `write_bytes` These values count the number of bytes read from or written to this block device. -#### `read_time` & `write_time`: +### `read_time` & `write_time` These values count the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, @@ -89,49 +89,51 @@ these values will increase at a rate greater than 1000/second; for example, if 60 read requests wait for an average of 30 ms, the read_time field will increase by 60*30 = 1800. -#### `io_time`: +### `io_time` This value counts the number of milliseconds during which the device has had I/O requests queued. -#### `weighted_io_time`: +### `weighted_io_time` This value counts the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, this value will increase as the product of the number of milliseconds times the number of requests waiting (see `read_time` above for an example). -#### `iops_in_progress`: +### `iops_in_progress` This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include I/O requests that are in the queue but not yet issued to the device driver. -#### `merged_reads` & `merged_writes`: +### `merged_reads` & `merged_writes` Reads and writes which are adjacent to each other may be merged for efficiency. Thus two 4K reads may become one 8K read before it is ultimately handed to the disk, and so it will be counted (and queued) as only one I/O. These fields lets you know how often this was done. -### Sample Queries: +## Sample Queries -#### Calculate percent IO utilization per disk and host: -``` +### Calculate percent IO utilization per disk and host + +```sql SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -#### Calculate average queue depth: +### Calculate average queue depth + `iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. -``` + +```sql SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -### Example Output: +## Example -``` +```shell diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000 diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000 diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000 - ``` diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index 3ad203de09362..383e7e81044ec 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md index ad05658cc2b14..2312bd2c889e3 100644 --- a/plugins/inputs/disque/README.md +++ b/plugins/inputs/disque/README.md @@ -2,11 +2,10 @@ [Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker. - -### Configuration: +## Configuration ```toml -[[inputs.disque]] +[[inputs.disque]] ## An array of URI to gather stats about. Specify an ip or hostname ## with optional port and password. ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -14,8 +13,7 @@ servers = ["localhost"] ``` -### Metrics - +## Metrics - disque - disque_host diff --git a/plugins/inputs/dmcache/README.md b/plugins/inputs/dmcache/README.md index 536d3f518bcaa..bfc30f678bdc2 100644 --- a/plugins/inputs/dmcache/README.md +++ b/plugins/inputs/dmcache/README.md @@ -6,7 +6,7 @@ This plugin requires sudo, that is why you should setup and be sure that the tel `sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes. -### Configuration +## Configuration ```toml [[inputs.dmcache]] @@ -14,33 +14,33 @@ This plugin requires sudo, that is why you should setup and be sure that the tel per_device = true ``` -### Measurements & Fields: +## Measurements & Fields - dmcache - - length - - target - - metadata_blocksize - - metadata_used - - metadata_total - - cache_blocksize - - cache_used - - cache_total - - read_hits - - read_misses - - write_hits - - write_misses - - demotions - - promotions - - dirty - -### Tags: + - length + - target + - metadata_blocksize + - metadata_used + - metadata_total + - cache_blocksize + - cache_used + - cache_total + - read_hits + - read_misses + - write_hits + - write_misses + - demotions + - promotions + - dirty + +## Tags - All measurements have the following tags: - - device + - device -### Example Output: +## Example Output -``` +```shell $ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache * Plugin: inputs.dmcache, Collection 1 > dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000 diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index dc8ddd90373e9..287addc20d8d9 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -2,7 +2,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) -### Configuration: +## Configuration + ```toml # Query given DNS server and gives statistics [[inputs.dns_query]] @@ -26,7 +27,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # timeout = 2 ``` -### Metrics: +## Metrics - dns_query - tags: @@ -40,8 +41,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi - result_code (int, success = 0, timeout = 1, error = 2) - rcode_value (int) +## Rcode Descriptions -### Rcode Descriptions |rcode_value|rcode|Description| |---|-----------|-----------------------------------| |0 | NoError | No Error | @@ -65,9 +66,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi |22 | BADTRUNC | Bad Truncation | |23 | BADCOOKIE | Bad/missing Server Cookie | +### Example -### Example Output: - -``` +```shell dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 ``` diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index c1dd7abf06121..2e57e2f7b07ba 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -4,11 +4,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var servers = []string{"8.8.8.8"} @@ -25,12 +24,12 @@ func TestGathering(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringMxRecord(t *testing.T) { @@ -45,12 +44,12 @@ func TestGatheringMxRecord(t *testing.T) { dnsConfig.RecordType = "MX" err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringRootDomain(t *testing.T) { @@ -71,12 +70,12 @@ func TestGatheringRootDomain(t *testing.T) { "result": "success", } fields := map[string]interface{}{ - "rcode_value": int(0), + "rcode_value": 0, "result_code": uint64(0), } err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -102,12 +101,12 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { "result": "success", } fields := map[string]interface{}{ - "rcode_value": int(0), + "rcode_value": 0, "result_code": uint64(0), } err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -134,9 +133,9 @@ func TestGatheringTimeout(t *testing.T) { }() select { case err := <-channel: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(time.Second * 2): - assert.Fail(t, "DNS query did not timeout") + require.Fail(t, "DNS query did not timeout") } } @@ -145,16 +144,16 @@ func TestSettingDefaultValues(t *testing.T) { dnsConfig.setDefaultValues() - assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") - assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") - assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") + require.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") + require.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") dnsConfig = DNSQuery{Domains: []string{"."}} dnsConfig.setDefaultValues() - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") } func TestRecordTypeParser(t *testing.T) { @@ -163,47 +162,47 @@ func TestRecordTypeParser(t *testing.T) { dnsConfig.RecordType = "A" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeA, recordType) + require.Equal(t, dns.TypeA, recordType) dnsConfig.RecordType = "AAAA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeAAAA, recordType) + require.Equal(t, dns.TypeAAAA, recordType) dnsConfig.RecordType = "ANY" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeANY, recordType) + require.Equal(t, dns.TypeANY, recordType) dnsConfig.RecordType = "CNAME" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeCNAME, recordType) + require.Equal(t, dns.TypeCNAME, recordType) dnsConfig.RecordType = "MX" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeMX, recordType) + require.Equal(t, dns.TypeMX, recordType) dnsConfig.RecordType = "NS" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeNS, recordType) + require.Equal(t, dns.TypeNS, recordType) dnsConfig.RecordType = "PTR" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypePTR, recordType) + require.Equal(t, dns.TypePTR, recordType) dnsConfig.RecordType = "SOA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSOA, recordType) + require.Equal(t, dns.TypeSOA, recordType) dnsConfig.RecordType = "SPF" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSPF, recordType) + require.Equal(t, dns.TypeSPF, recordType) dnsConfig.RecordType = "SRV" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSRV, recordType) + require.Equal(t, dns.TypeSRV, recordType) dnsConfig.RecordType = "TXT" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeTXT, recordType) + require.Equal(t, dns.TypeTXT, recordType) } func TestRecordTypeParserError(t *testing.T) { @@ -212,5 +211,5 @@ func TestRecordTypeParserError(t *testing.T) { dnsConfig.RecordType = "nil" _, err = dnsConfig.parseRecordType() - assert.Error(t, err) + require.Error(t, err) } diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 8d75e641a1fb4..5a0585b414dca 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -6,7 +6,7 @@ docker containers. The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/). -### Configuration: +## Configuration ```toml # Read metrics about docker containers @@ -46,23 +46,23 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Whether to report for each container per-device blkio (8:0, 8:1...), ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. - ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting ## is honored. perdevice = true - + ## Specifies for which classes a per-device metric should be issued ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) ## Please note that this setting has no effect if 'perdevice' is set to 'true' # perdevice_include = ["cpu"] - + ## Whether to report for each container total blkio and network stats or not. ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. - ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting ## is honored. total = false - + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. - ## Possible values are 'cpu', 'blkio' and 'network' + ## Possible values are 'cpu', 'blkio' and 'network' ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. ## Please note that this setting has no effect if 'total' is set to 'false' # total_include = ["cpu", "blkio", "network"] @@ -83,23 +83,23 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the [cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). -#### Security +### Security Giving telegraf access to the Docker daemon expands the [attack surface](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface) that could result in an attacker gaining root access to a machine. This is especially relevant if the telegraf configuration can be changed by untrusted users. -#### Docker Daemon Permissions +### Docker Daemon Permissions Typically, telegraf must be given permission to access the docker daemon unix socket when using the default endpoint. This can be done by adding the `telegraf` unix user (created when installing a Telegraf package) to the `docker` unix group with the following command: -``` +```shell sudo usermod -aG docker telegraf ``` @@ -108,12 +108,12 @@ within the telegraf container. This can be done in the docker CLI by add the option `-v /var/run/docker.sock:/var/run/docker.sock` or adding the following lines to the telegraf container definition in a docker compose file: -``` +```yaml volumes: - /var/run/docker.sock:/var/run/docker.sock ``` -#### source tag +### source tag Selecting the containers measurements can be tricky if you have many containers with the same name. To alleviate this issue you can set the below value to `true` @@ -124,20 +124,20 @@ source_tag = true This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. -#### Kubernetes Labels +### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you may prefer to exclude them: -``` + +```json docker_label_exclude = ["annotation.kubernetes*"] ``` +### Docker-compose Labels -#### Docker-compose Labels +Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. -Docker-compose will add labels to your containers. You can limit restrict labels to selected ones, e.g. - -``` +```json docker_label_include = [ "com.docker.compose.config-hash", "com.docker.compose.container-number", @@ -147,15 +147,14 @@ Docker-compose will add labels to your containers. You can limit restrict labels ] ``` - -### Metrics: +### Metrics - docker - tags: - unit - engine_host - server_version - + fields: + - fields: - n_used_file_descriptors - n_cpus - n_containers @@ -171,12 +170,12 @@ Docker-compose will add labels to your containers. You can limit restrict labels The `docker_data` and `docker_metadata` measurements are available only for some storage drivers such as devicemapper. -+ docker_data (deprecated see: `docker_devicemapper`) +- docker_data (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - + fields: + - fields: - available - total - used @@ -186,7 +185,7 @@ some storage drivers such as devicemapper. - unit - engine_host - server_version - + fields: + - fields: - available - total - used @@ -198,7 +197,7 @@ The above measurements for the devicemapper storage driver can now be found in t - engine_host - server_version - pool_name - + fields: + - fields: - pool_blocksize_bytes - data_space_used_bytes - data_space_total_bytes @@ -208,7 +207,7 @@ The above measurements for the devicemapper storage driver can now be found in t - metadata_space_available_bytes - thin_pool_minimum_free_space_bytes -+ docker_container_mem +- docker_container_mem - tags: - engine_host - server_version @@ -216,7 +215,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_name - container_status - container_version - + fields: + - fields: - total_pgmajfault - cache - mapped_file @@ -261,7 +260,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - cpu - + fields: + - fields: - throttling_periods - throttling_throttled_periods - throttling_throttled_time @@ -272,7 +271,7 @@ The above measurements for the devicemapper storage driver can now be found in t - usage_percent - container_id -+ docker_container_net +- docker_container_net - tags: - engine_host - server_version @@ -281,7 +280,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - network - + fields: + - fields: - rx_dropped - rx_bytes - rx_errors @@ -327,8 +326,8 @@ status if configured. - container_status - container_version - fields: - - health_status (string) - - failing_streak (integer) + - health_status (string) + - failing_streak (integer) - docker_container_status - tags: @@ -356,9 +355,9 @@ status if configured. - tasks_desired - tasks_running -### Example Output: +## Example -``` +```shell docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000 docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000 docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000 diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md index d2f0dc6144ff9..99fbcee0512fe 100644 --- a/plugins/inputs/docker_log/README.md +++ b/plugins/inputs/docker_log/README.md @@ -12,7 +12,7 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the [Official Docker Client]: https://github.com/moby/moby/tree/master/client [Engine API]: https://docs.docker.com/engine/api/v1.24/ -### Configuration +## Configuration ```toml [[inputs.docker_log]] @@ -54,14 +54,14 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the [CLI Docker environment variables][env] [env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient -### source tag +## source tag Selecting the containers can be tricky if you have many containers with the same name. To alleviate this issue you can set the below value to `true` @@ -72,7 +72,7 @@ source_tag = true This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. -### Metrics +## Metrics - docker_log - tags: @@ -85,9 +85,9 @@ This will cause all data points to have the `source` tag be set to the first 12 - container_id - message -### Example Output +## Example Output -``` +```shell docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000 diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 9e44d99edbc07..573cbd1f79d04 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -6,7 +6,7 @@ metrics on configured domains. When using Dovecot v2.3 you are still able to use this protocol by following the [upgrading steps][upgrading]. -### Configuration: +## Configuration ```toml # Read metrics about dovecot servers @@ -23,50 +23,49 @@ the [upgrading steps][upgrading]. ## Type is one of "user", "domain", "ip", or "global" type = "global" - + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] ``` -### Metrics: +## Metrics - dovecot - tags: - - server (hostname) - - type (query type) - - ip (ip addr) - - user (username) - - domain (domain name) + - server (hostname) + - type (query type) + - ip (ip addr) + - user (username) + - domain (domain name) - fields: - - reset_timestamp (string) - - last_update (string) - - num_logins (integer) - - num_cmds (integer) - - num_connected_sessions (integer) - - user_cpu (float) - - sys_cpu (float) - - clock_time (float) - - min_faults (integer) - - maj_faults (integer) - - vol_cs (integer) - - invol_cs (integer) - - disk_input (integer) - - disk_output (integer) - - read_count (integer) - - read_bytes (integer) - - write_count (integer) - - write_bytes (integer) - - mail_lookup_path (integer) - - mail_lookup_attr (integer) - - mail_read_count (integer) - - mail_read_bytes (integer) - - mail_cache_hits (integer) - + - reset_timestamp (string) + - last_update (string) + - num_logins (integer) + - num_cmds (integer) + - num_connected_sessions (integer) + - user_cpu (float) + - sys_cpu (float) + - clock_time (float) + - min_faults (integer) + - maj_faults (integer) + - vol_cs (integer) + - invol_cs (integer) + - disk_input (integer) + - disk_output (integer) + - read_count (integer) + - read_bytes (integer) + - write_count (integer) + - write_bytes (integer) + - mail_lookup_path (integer) + - mail_lookup_attr (integer) + - mail_read_count (integer) + - mail_read_bytes (integer) + - mail_cache_hits (integer) -### Example Output: +### Example Output -``` +```shell dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 ``` diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md index 00398760d2e9d..1570227ac1778 100644 --- a/plugins/inputs/dpdk/README.md +++ b/plugins/inputs/dpdk/README.md @@ -1,4 +1,5 @@ # Data Plane Development Kit (DPDK) Input Plugin + The `dpdk` plugin collects metrics exposed by applications built with [Data Plane Development Kit](https://www.dpdk.org/) which is an extensive set of open source libraries designed for accelerating packet processing workloads. @@ -23,13 +24,15 @@ to discover and test the capabilities of DPDK libraries and to explore the expos > `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is > available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable > querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. - +> > **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK > will also require root access. This means that either access permissions have to be adjusted for socket telemetry > interface to allow Telegraf to access it, or Telegraf should run with root privileges. ## Configuration + This plugin offers multiple configuration options, please review examples below for additional usage information. + ```toml # Reads metrics from DPDK applications using v2 telemetry interface. [[inputs.dpdk]] @@ -50,7 +53,7 @@ This plugin offers multiple configuration options, please review examples below ## List of custom, application-specific telemetry commands to query ## The list of available commands depend on the application deployed. Applications can register their own commands ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands - ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: ## additional_commands = ["/l3fwd-power/stats"] # additional_commands = [] @@ -60,28 +63,34 @@ This plugin offers multiple configuration options, please review examples below exclude_commands = ["/ethdev/link_status"] ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify - ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. ## [inputs.dpdk.tags] ## dpdk_instance = "my-fwd-app" ``` ### Example: Minimal Configuration for NIC metrics + This configuration allows getting metrics for all devices reported via `/ethdev/list` command: + * `/ethdev/stats` - basic device statistics (since `DPDK 20.11`) * `/ethdev/xstats` - extended device statistics * `/ethdev/link_status` - up/down link status + ```toml [[inputs.dpdk]] device_types = ["ethdev"] ``` + Since this configuration will query `/ethdev/link_status` it's recommended to increase timeout to `socket_access_timeout = "10s"`. The [plugin collecting interval](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) should be adjusted accordingly (e.g. `interval = "30s"`). ### Example: Excluding NIC link status from being collected + Checking link status depending on underlying implementation may take more time to complete. This configuration can be used to exclude this telemetry command to allow faster response for metrics. + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -89,13 +98,16 @@ This configuration can be used to exclude this telemetry command to allow faster [inputs.dpdk.ethdev] exclude_commands = ["/ethdev/link_status"] ``` + A separate plugin instance with higher timeout settings can be used to get `/ethdev/link_status` independently. Consult [Independent NIC link status configuration](#example-independent-nic-link-status-configuration) and [Getting metrics from multiple DPDK instances running on same host](#example-getting-metrics-from-multiple-dpdk-instances-running-on-same-host) examples for further details. ### Example: Independent NIC link status configuration + This configuration allows getting `/ethdev/link_status` using separate configuration, with higher timeout. + ```toml [[inputs.dpdk]] interval = "30s" @@ -107,8 +119,10 @@ This configuration allows getting `/ethdev/link_status` using separate configura ``` ### Example: Getting application-specific metrics -This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from + +This configuration allows reading custom metrics exposed by applications. Example telemetry command obtained from [L3 Forwarding with Power Management Sample Application](https://doc.dpdk.org/guides/sample_app_ug/l3_forward_power_man.html). + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -117,18 +131,22 @@ This configuration allows reading custom metrics exposed by applications. Exampl [inputs.dpdk.ethdev] exclude_commands = ["/ethdev/link_status"] ``` + Command entries specified in `additional_commands` should match DPDK command format: + * Command entry format: either `command` or `command,params` for commands that expect parameters, where comma (`,`) separates command from params. * Command entry length (command with params) should be `< 1024` characters. * Command length (without params) should be `< 56` characters. * Commands have to start with `/`. Providing invalid commands will prevent the plugin from starting. Additional commands allow duplicates, but they -will be removed during execution so each command will be executed only once during each metric gathering interval. +will be removed during execution so each command will be executed only once during each metric gathering interval. ### Example: Getting metrics from multiple DPDK instances running on same host + This configuration allows getting metrics from two separate applications exposing their telemetry interfaces -via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. +via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. + ```toml # Instance #1 - L3 Forwarding with Power Management Application [[inputs.dpdk]] @@ -153,22 +171,26 @@ via separate sockets. For each plugin instance a unique tag `[inputs.dpdk.tags]` [inputs.dpdk.tags] dpdk_instance = "l2fwd-cat" ``` + This utilizes Telegraf's standard capability of [adding custom tags](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#input-plugins) to input plugin's measurements. ## Metrics + The DPDK socket accepts `command,params` requests and returns metric data in JSON format. All metrics from DPDK socket -become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. +become flattened using [Telegraf's JSON Flattener](../../parsers/json/README.md) and exposed as fields. If DPDK response contains no information (is empty or is null) then such response will be discarded. -> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK +> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK > may contain various sets of metrics. While metrics from `/ethdev/stats` should be most stable, the `/ethdev/xstats` > may contain driver-specific metrics (depending on DPDK application configuration). The application-specific commands > like `/l3fwd-power/stats` can return their own specific set of metrics. ## Example output + The output consists of plugin name (`dpdk`), and a set of tags that identify querying hierarchy: -``` + +```shell dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fields] [timestamp] ``` @@ -177,9 +199,10 @@ dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fi | `host` | hostname of the machine (consult [Telegraf Agent configuration](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent) for additional details) | | `dpdk_instance` | custom tag from `[inputs.dpdk.tags]` (optional) | | `command` | executed command (without params) | -| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`
For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | +| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`. For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | When running plugin configuration below... + ```toml [[inputs.dpdk]] device_types = ["ethdev"] @@ -189,7 +212,8 @@ When running plugin configuration below... ``` ...expected output for `dpdk` plugin instance running on host named `host=dpdk-host`: -``` + +```shell dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 out_octets_encrypted=0,rx_fcoe_mbuf_allocation_errors=0,tx_q1packets=0,rx_priority0_xoff_packets=0,rx_priority7_xoff_packets=0,rx_errors=0,mac_remote_errors=0,in_pkts_invalid=0,tx_priority3_xoff_packets=0,tx_errors=0,rx_fcoe_bytes=0,rx_flow_control_xon_packets=0,rx_priority4_xoff_packets=0,tx_priority2_xoff_packets=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_management_packets=0,rx_priority7_dropped=0,rx_priority4_dropped=0,in_pkts_unchecked=0,rx_error_bytes=0,rx_size_256_to_511_packets=0,tx_priority4_xoff_packets=0,rx_priority6_xon_packets=0,tx_priority4_xon_to_xoff_packets=0,in_pkts_delayed=0,rx_priority0_mbuf_allocation_errors=0,out_octets_protected=0,tx_priority7_xon_to_xoff_packets=0,tx_priority1_xon_to_xoff_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_priority6_xon_to_xoff_packets=0,flow_director_filter_add_errors=0,rx_total_packets=99,rx_crc_errors=0,flow_director_filter_remove_errors=0,rx_missed_errors=0,tx_size_64_packets=0,rx_priority3_dropped=0,flow_director_matched_filters=0,tx_priority2_xon_to_xoff_packets=0,rx_priority1_xon_packets=0,rx_size_65_to_127_packets=99,rx_fragment_errors=0,in_pkts_notusingsa=0,rx_q0bytes=7162,rx_fcoe_dropped=0,rx_priority1_dropped=0,rx_fcoe_packets=0,rx_priority5_xoff_packets=0,out_pkts_protected=0,tx_total_packets=0,rx_priority2_dropped=0,in_pkts_late=0,tx_q1bytes=0,in_pkts_badtag=0,rx_multicast_packets=99,rx_priority6_xoff_packets=0,tx_flow_control_xoff_packets=0,rx_flow_control_xoff_packets=0,rx_priority0_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,rx_priority7_mbuf_allocation_errors=0,tx_priority0_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,tx_q0packets=0,tx_xoff_packets=0,rx_size_512_to_1023_packets=0,rx_priority3_xon_packets=0,rx_q0errors=0,rx_oversize_errors=0,tx_priority4_xon_packets=0,tx_priority5_xoff_packets=0,rx_priority5_xon_packets=0,rx_total_missed_packets=0,rx_priority4_mbuf_allocation_errors=0,tx_priority1_xon_packets=0,tx_management_packets=0,rx_priority5_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,rx_undersize_errors=0,tx_priority1_xoff_packets=0,rx_q0packets=99,tx_q2packets=0,tx_priority6_xon_packets=0,rx_good_packets=99,tx_priority5_xon_packets=0,tx_size_256_to_511_packets=0,rx_priority6_dropped=0,rx_broadcast_packets=0,tx_size_512_to_1023_packets=0,tx_priority3_xon_to_xoff_packets=0,in_pkts_unknownsci=0,in_octets_validated=0,tx_priority6_xoff_packets=0,tx_priority7_xoff_packets=0,rx_jabber_errors=0,tx_priority7_xon_packets=0,tx_priority0_xon_packets=0,in_pkts_unusedsa=0,tx_priority0_xoff_packets=0,mac_local_errors=33,rx_total_bytes=7162,in_pkts_notvalid=0,rx_length_errors=0,in_octets_decrypted=0,rx_size_128_to_255_packets=0,rx_good_bytes=7162,tx_size_65_to_127_packets=0,rx_mac_short_packet_dropped=0,tx_size_1024_to_max_packets=0,rx_priority2_mbuf_allocation_errors=0,flow_director_added_filters=0,tx_multicast_packets=0,rx_fcoe_crc_errors=0,rx_priority1_xoff_packets=0,flow_director_missed_filters=0,rx_xon_packets=0,tx_size_128_to_255_packets=0,out_pkts_encrypted=0,rx_priority4_xon_packets=0,rx_priority0_dropped=0,rx_size_1024_to_max_packets=0,tx_good_bytes=0,rx_management_dropped=0,rx_mbuf_allocation_errors=0,tx_xon_packets=0,rx_priority3_xoff_packets=0,tx_good_packets=0,tx_fcoe_bytes=0,rx_priority6_mbuf_allocation_errors=0,rx_priority2_xon_packets=0,tx_broadcast_packets=0,tx_q2bytes=0,rx_priority7_xon_packets=0,out_pkts_untagged=0,rx_priority2_xoff_packets=0,rx_priority1_mbuf_allocation_errors=0,tx_q0bytes=0,rx_size_64_packets=0,rx_priority5_dropped=0,tx_priority2_xon_packets=0,in_pkts_nosci=0,flow_director_removed_filters=0,in_pkts_ok=0,rx_l3_l4_xsum_error=0,rx_priority3_mbuf_allocation_errors=0,tx_priority3_xon_packets=0 1606310780000000000 diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 0bf8b983cd219..b5152a3ebfab8 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -14,7 +14,7 @@ formats. The amazon-ecs-agent (though it _is_ a container running on the host) is not present in the metadata/stats endpoints. -### Configuration +## Configuration ```toml # Read metrics about ECS containers @@ -45,7 +45,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Configuration (enforce v2 metadata) +## Configuration (enforce v2 metadata) ```toml # Read metrics about ECS containers @@ -76,7 +76,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Metrics +## Metrics - ecs_task - tags: @@ -92,7 +92,7 @@ present in the metadata/stats endpoints. - limit_cpu (float) - limit_mem (float) -+ ecs_container_mem +- ecs_container_mem - tags: - cluster - task_arn @@ -158,7 +158,7 @@ present in the metadata/stats endpoints. - usage_percent - usage_total -+ ecs_container_net +- ecs_container_net - tags: - cluster - task_arn @@ -200,7 +200,7 @@ present in the metadata/stats endpoints. - io_serviced_recursive_total - io_serviced_recursive_write -+ ecs_container_meta +- ecs_container_meta - tags: - cluster - task_arn @@ -221,10 +221,9 @@ present in the metadata/stats endpoints. - started_at - type +## Example -### Example Output - -``` +```shell ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 @@ -242,4 +241,4 @@ ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs [docker-input]: /plugins/inputs/docker/README.md [task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html -[task-metadata-endpoint-v3] https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html +[task-metadata-endpoint-v3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 7e9d7e393346f..14b32c6851db7 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type pollMock struct { @@ -80,8 +80,8 @@ func TestEcsClient_PollSync(t *testing.T) { t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) - assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) + require.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) + require.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) }) } } @@ -160,7 +160,7 @@ func TestEcsClient_Task(t *testing.T) { t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) }) } } @@ -234,7 +234,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) }) } } @@ -268,10 +268,10 @@ func TestResolveTaskURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveTaskURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } @@ -305,10 +305,10 @@ func TestResolveStatsURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveStatsURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 0afb0e325dbdd..e39bc025edb88 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -12,6 +12,7 @@ In addition, the following optional queries are only made by the master node: [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) Specific Elasticsearch endpoints that are queried: + - Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting - Cluster Heath: /_cluster/health?level=indices - Cluster Stats: /_cluster/stats @@ -20,7 +21,7 @@ Specific Elasticsearch endpoints that are queried: Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. -### Configuration +## Configuration ```toml [[inputs.elasticsearch]] @@ -81,7 +82,7 @@ Note that specific statistics information can change between Elasticsearch versi # num_most_recent_indices = 0 ``` -### Metrics +## Metrics Emitted when `cluster_health = true`: @@ -169,7 +170,7 @@ Emitted when `cluster_stats = true`: - shards_total (float) - store_size_in_bytes (float) -+ elasticsearch_clusterstats_nodes +- elasticsearch_clusterstats_nodes - tags: - cluster_name - node_name @@ -230,7 +231,7 @@ Emitted when the appropriate `node_stats` options are set. - tx_count (float) - tx_size_in_bytes (float) -+ elasticsearch_breakers +- elasticsearch_breakers - tags: - cluster_name - node_attribute_ml.enabled @@ -291,7 +292,7 @@ Emitted when the appropriate `node_stats` options are set. - total_free_in_bytes (float) - total_total_in_bytes (float) -+ elasticsearch_http +- elasticsearch_http - tags: - cluster_name - node_attribute_ml.enabled @@ -402,7 +403,7 @@ Emitted when the appropriate `node_stats` options are set. - warmer_total (float) - warmer_total_time_in_millis (float) -+ elasticsearch_jvm +- elasticsearch_jvm - tags: - cluster_name - node_attribute_ml.enabled @@ -480,7 +481,7 @@ Emitted when the appropriate `node_stats` options are set. - swap_used_in_bytes (float) - timestamp (float) -+ elasticsearch_process +- elasticsearch_process - tags: - cluster_name - node_attribute_ml.enabled diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 333630c958703..9a805f3ca5789 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -2,7 +2,7 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver. -### Configuration: +## Configuration ```toml # Returns ethtool statistics for given interfaces @@ -30,13 +30,13 @@ Interfaces can be included or ignored using: Note that loopback interfaces will be automatically ignored. -### Metrics: +## Metrics Metrics are dependent on the network device and driver. -### Example Output: +## Example Output -``` +```shell ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,interface_up=1i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,interface_up=0i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 ``` diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index f9573ee054429..e348427d05366 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" ) @@ -310,8 +310,8 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) expectedTagsEth1 := map[string]string{ @@ -334,8 +334,8 @@ func TestGatherIncludeInterfaces(t *testing.T) { command.InterfaceInclude = append(command.InterfaceInclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -361,8 +361,8 @@ func TestGatherIgnoreInterfaces(t *testing.T) { command.InterfaceExclude = append(command.InterfaceExclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should not contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -489,8 +489,8 @@ func TestNormalizedKeys(t *testing.T) { var acc testutil.Accumulator err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) acc.AssertContainsFields(t, pluginName, toStringMapInterface(c.expectedFields)) acc.AssertContainsTaggedFields(t, pluginName, toStringMapInterface(c.expectedFields), expectedTags) diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index c0533b513b8bf..dc99bd281f6e3 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -2,15 +2,15 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. -### IoT Hub Setup +## IoT Hub Setup The main focus for development of this plugin is Azure IoT hub: -1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ +1. Create an Azure IoT Hub by following any of the guides provided here: [Azure IoT Hub](https://docs.microsoft.com/en-us/azure/iot-hub/) 2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) 3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work -### Configuration +## Configuration ```toml [[inputs.eventhub_consumer]] @@ -98,7 +98,7 @@ The main focus for development of this plugin is Azure IoT hub: data_format = "influx" ``` -#### Environment Variables +### Environment Variables [Full documentation of the available environment variables][envvar]. diff --git a/plugins/inputs/example/README.md b/plugins/inputs/example/README.md index 6b86615b0e6a8..5778494f0af1e 100644 --- a/plugins/inputs/example/README.md +++ b/plugins/inputs/example/README.md @@ -7,7 +7,7 @@ additional information can be found. Telegraf minimum version: Telegraf x.x Plugin minimum tested version: x.x -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. @@ -17,12 +17,12 @@ generate it using `telegraf --usage `. example_option = "example_value" ``` -#### example_option +### example_option A more in depth description of an option can be provided here, but only do so if the option cannot be fully described in the sample config. -### Metrics +## Metrics Here you should add an optional description and links to where the user can get more information about the measurements. @@ -39,7 +39,7 @@ mapped to the output. - field1 (type, unit) - field2 (float, percent) -+ measurement2 +- measurement2 - tags: - tag3 - fields: @@ -49,29 +49,30 @@ mapped to the output. - field6 (float) - field7 (boolean) -### Sample Queries +## Sample Queries This section can contain some useful InfluxDB queries that can be used to get started with the plugin or to generate dashboards. For each query listed, describe at a high level what data is returned. Get the max, mean, and min for the measurement in the last hour: -``` + +```sql SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag ``` -### Troubleshooting +## Troubleshooting This optional section can provide basic troubleshooting steps that a user can perform. -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455 measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455 ``` diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 4e3d7245422d2..e682ef4abbe0c 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -5,7 +5,7 @@ their output in any one of the accepted [Input Data Formats](https://github.com/ This plugin can be used to poll for custom metrics from any source. -### Configuration: +## Configuration ```toml [[inputs.exec]] @@ -32,15 +32,17 @@ This plugin can be used to poll for custom metrics from any source. Glob patterns in the `command` option are matched on every run, so adding new scripts that match the pattern will cause them to be picked up immediately. -### Example: +## Example This script produces static values, since no timestamp is specified the values are at the current time. + ```sh #!/bin/sh echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i' ``` It can be paired with the following configuration and will be run at the `interval` of the agent. + ```toml [[inputs.exec]] commands = ["sh /tmp/test.sh"] @@ -48,18 +50,19 @@ It can be paired with the following configuration and will be run at the `interv data_format = "influx" ``` -### Common Issues: +## Common Issues -#### My script works when I run it by hand, but not when Telegraf is running as a service. +### My script works when I run it by hand, but not when Telegraf is running as a service This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. -#### With a PowerShell on Windows, the output of the script appears to be truncated. +### With a PowerShell on Windows, the output of the script appears to be truncated You may need to set a variable in your script to increase the number of columns available for output: -``` + +```shell $host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) ``` diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d0647476c77ae..22465318bbe71 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -13,10 +13,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const validJSON = ` @@ -94,7 +94,7 @@ func TestExec(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(e.Gather) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") + require.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), @@ -123,7 +123,7 @@ func TestExecMalformed(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestCommandError(t *testing.T) { @@ -140,7 +140,7 @@ func TestCommandError(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestExecCommandWithGlob(t *testing.T) { @@ -263,14 +263,14 @@ func TestRemoveCarriageReturns(t *testing.T) { for _, test := range crTests { b := bytes.NewBuffer(test.input) out := removeWindowsCarriageReturns(*b) - assert.True(t, bytes.Equal(test.output, out.Bytes())) + require.True(t, bytes.Equal(test.output, out.Bytes())) } } else { // Test that the buffer is returned unaltered for _, test := range crTests { b := bytes.NewBuffer(test.input) out := removeWindowsCarriageReturns(*b) - assert.True(t, bytes.Equal(test.input, out.Bytes())) + require.True(t, bytes.Equal(test.input, out.Bytes())) } } } diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index aa37e7cd7696a..c5299713cece8 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -1,7 +1,7 @@ # Execd Input Plugin -The `execd` plugin runs an external program as a long-running daemon. -The programs must output metrics in any one of the accepted +The `execd` plugin runs an external program as a long-running daemon. +The programs must output metrics in any one of the accepted [Input Data Formats][] on the process's STDOUT, and is expected to stay running. If you'd instead like the process to collect metrics and then exit, check out the [inputs.exec][] plugin. @@ -13,7 +13,7 @@ new line to the process's STDIN. STDERR from the process will be relayed to Telegraf as errors in the logs. -### Configuration: +## Configuration ```toml [[inputs.execd]] @@ -41,9 +41,9 @@ STDERR from the process will be relayed to Telegraf as errors in the logs. data_format = "influx" ``` -### Example +## Example -##### Daemon written in bash using STDIN signaling +### Daemon written in bash using STDIN signaling ```bash #!/bin/bash @@ -62,7 +62,7 @@ done signal = "STDIN" ``` -##### Go daemon using SIGHUP +### Go daemon using SIGHUP ```go package main @@ -96,7 +96,7 @@ func main() { signal = "SIGHUP" ``` -##### Ruby daemon running standalone +### Ruby daemon running standalone ```ruby #!/usr/bin/env ruby diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 8d7faa2268878..c1a3d0ea24d84 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -15,10 +15,7 @@ func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt ch signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) go func() { - select { - case <-ctx.Done(): - // context done. stop to signals to avoid pushing messages to a closed channel - signal.Stop(collectMetricsPrompt) - } + <-ctx.Done() + signal.Stop(collectMetricsPrompt) }() } diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index 396928ff44036..1059bc2b7f2db 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -121,10 +121,10 @@ func TestLoadConfig(t *testing.T) { }) c := "./testdata/plugin.conf" - inputs, err := LoadConfig(&c) + loadedInputs, err := LoadConfig(&c) require.NoError(t, err) - inp := inputs[0].(*serviceInput) + inp := loadedInputs[0].(*serviceInput) require.Equal(t, "awesome name", inp.ServiceName) require.Equal(t, "xxxxxxxxxx", inp.SecretToken) diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index 1762bbaf209cb..221f9d5b44c68 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -9,7 +9,7 @@ Acquiring the required permissions can be done using several methods: - [Use sudo](#using-sudo) run fail2ban-client. - Run telegraf as root. (not recommended) -### Configuration +## Configuration ```toml # Read metrics from fail2ban. @@ -18,7 +18,7 @@ Acquiring the required permissions can be done using several methods: use_sudo = false ``` -### Using sudo +## Using sudo Make sure to set `use_sudo = true` in your configuration file. @@ -26,20 +26,21 @@ You will also need to update your sudoers file. It is recommended to modify a file in the `/etc/sudoers.d` directory using `visudo`: ```bash -$ sudo visudo -f /etc/sudoers.d/telegraf +sudo visudo -f /etc/sudoers.d/telegraf ``` Add the following lines to the file, these commands allow the `telegraf` user to call `fail2ban-client` without needing to provide a password and disables logging of the call in the auth.log. Consult `man 8 visudo` and `man 5 sudoers` for details. -``` + +```text Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN Defaults!FAIL2BAN !logfile, !syslog, !pam_session ``` -### Metrics +## Metrics - fail2ban - tags: @@ -50,7 +51,7 @@ Defaults!FAIL2BAN !logfile, !syslog, !pam_session ### Example Output -``` +```shell # fail2ban-client status sshd Status for the jail: sshd |- Filter @@ -63,6 +64,6 @@ Status for the jail: sshd `- Banned IP list: 192.168.0.1 192.168.0.2 ``` -``` +```shell fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000 ``` diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md index 54c20310224b3..d02af0d5b8f74 100644 --- a/plugins/inputs/fibaro/README.md +++ b/plugins/inputs/fibaro/README.md @@ -3,7 +3,7 @@ The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices. Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc. -### Configuration: +## Configuration ```toml # Read devices value(s) from a Fibaro controller @@ -20,7 +20,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers # timeout = "5s" ``` -### Metrics: +## Metrics - fibaro - tags: @@ -36,10 +36,9 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers - value (float) - value2 (float, when available from device) +## Example Output -### Example Output: - -``` +```shell fibaro,deviceId=9,host=vm1,name=Fenêtre\ haute,room=Cuisine,section=Cuisine,type=com.fibaro.FGRM222 energy=2.04,power=0.7,value=99,value2=99 1529996807000000000 fibaro,deviceId=10,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1529996807000000000 fibaro,deviceId=13,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 energy=4.33,power=0.7,value=99,value2=99 1529996807000000000 diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index 8ec406da7be3d..91ed7a8e1bc56 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -6,7 +6,7 @@ the selected [input data format][]. **Note:** If you wish to parse only newly appended lines use the [tail][] input plugin instead. -### Configuration: +## Configuration ```toml [[inputs.file]] @@ -20,10 +20,10 @@ plugin instead. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. Cautious when file name variation is high, this can increase the cardinality - ## significantly. Read more about cardinality here: + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" ``` diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index 81fc75908e798..9c0d4d79dd3b9 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -2,7 +2,7 @@ Reports the number and total size of files in specified directories. -### Configuration: +## Configuration ```toml [[inputs.filecount]] @@ -42,7 +42,7 @@ Reports the number and total size of files in specified directories. mtime = "0s" ``` -### Metrics +## Metrics - filecount - tags: @@ -51,9 +51,9 @@ Reports the number and total size of files in specified directories. - count (integer) - size_bytes (integer) -### Example Output: +## Example Output -``` +```shell filecount,directory=/var/cache/apt count=7i,size_bytes=7438336i 1530034445000000000 filecount,directory=/tmp count=17i,size_bytes=28934786i 1530034445000000000 ``` diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 840cafb53c06a..c8670471a9870 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -2,7 +2,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. -### Configuration: +## Configuration ```toml # Read stats about given file(s) @@ -16,22 +16,22 @@ The filestat plugin gathers metrics about file existence, size, and other stats. md5 = false ``` -### Measurements & Fields: +## Measurements & Fields - filestat - - exists (int, 0 | 1) - - size_bytes (int, bytes) - - modification_time (int, unix time nanoseconds) - - md5 (optional, string) + - exists (int, 0 | 1) + - size_bytes (int, bytes) + - modification_time (int, unix time nanoseconds) + - md5 (optional, string) -### Tags: +## Tags - All measurements have the following tags: - - file (the path the to file, as specified in the config) + - file (the path the to file, as specified in the config) -### Example Output: +### Example -``` +```shell $ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test * Plugin: filestat, Collection 1 > filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351 diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index 7e1f351fa0b7f..9b118c0814a78 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -4,7 +4,7 @@ The fireboard plugin gathers the real time temperature data from fireboard thermometers. In order to use this input plugin, you'll need to sign up to use the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). -### Configuration +## Configuration ```toml [[inputs.fireboard]] @@ -16,23 +16,23 @@ the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). # http_timeout = 4 ``` -#### auth_token +### auth_token In lieu of requiring a username and password, this plugin requires an authentication token that you can generate using the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html#Authentication). -#### url +### url While there should be no reason to override the URL, the option is available in case Fireboard changes their site, etc. -#### http_timeout +### http_timeout If you need to increase the HTTP timeout, you can do so here. You can set this value in seconds. The default value is four (4) seconds. -### Metrics +## Metrics The Fireboard REST API docs have good examples of the data that is available, currently this input only returns the real time temperatures. Temperature @@ -47,12 +47,12 @@ values are included if they are less than a minute old. - fields: - temperature (float, unit) -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 ``` diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index 3fabbddb75012..a7947ab2a7397 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -7,7 +7,8 @@ You might need to adjust your fluentd configuration, in order to reduce series c According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. example configuration with `@id` parameter for http plugin: -``` + +```text @type http @id http @@ -15,7 +16,7 @@ example configuration with `@id` parameter for http plugin: ``` -### Configuration: +## Configuration ```toml # Read metrics exposed by fluentd in_monitor plugin @@ -29,30 +30,30 @@ example configuration with `@id` parameter for http plugin: ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) exclude = [ - "monitor_agent", - "dummy", + "monitor_agent", + "dummy", ] ``` -### Measurements & Fields: +## Measurements & Fields Fields may vary depending on the plugin type - fluentd - - retry_count (float, unit) - - buffer_queue_length (float, unit) - - buffer_total_queued_size (float, unit) + - retry_count (float, unit) + - buffer_queue_length (float, unit) + - buffer_total_queued_size (float, unit) -### Tags: +## Tags - All measurements have the following tags: - - plugin_id (unique plugin id) - - plugin_type (type of the plugin e.g. s3) + - plugin_id (unique plugin id) + - plugin_type (type of the plugin e.g. s3) - plugin_category (plugin category e.g. output) -### Example Output: +## Example Output -``` +```shell $ telegraf --config fluentd.conf --input-filter fluentd --test * Plugin: inputs.fluentd, Collection 1 > fluentd,host=T440s,plugin_id=object:9f748c,plugin_category=input,plugin_type=dummy buffer_total_queued_size=0,buffer_queue_length=0,retry_count=0 1492006105000000000 diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index a920a48f54e1d..ed47cdfc4766c 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -5,14 +5,14 @@ Gather repository information from [GitHub][] hosted repositories. **Note:** Telegraf also contains the [webhook][] input which can be used as an alternative method for collecting repository information. -### Configuration +## Configuration ```toml [[inputs.github]] ## List of repositories to monitor repositories = [ - "influxdata/telegraf", - "influxdata/influxdb" + "influxdata/telegraf", + "influxdata/influxdb" ] ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -25,11 +25,11 @@ alternative method for collecting repository information. # http_timeout = "5s" ## List of additional fields to query. - ## NOTE: Getting those fields might involve issuing additional API-calls, so please - ## make sure you do not exceed the rate-limit of GitHub. - ## - ## Available fields are: - ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) # additional_fields = [] ``` @@ -52,7 +52,7 @@ alternative method for collecting repository information. When the [internal][] input is enabled: -+ internal_github +- internal_github - tags: - access_token - An obfuscated reference to the configured access token or "Unauthenticated" - fields: @@ -72,7 +72,7 @@ In the following we list the available options with the required API-calls and t ### Example Output -``` +```shell github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000 ``` diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index aa940f76d4e14..e7bbee0ea71dd 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -2,11 +2,11 @@ This plugin consumes telemetry data based on the [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. This input plugin is vendor-agnostic and is supported on any platform that supports the gNMI spec. -For Cisco devices: -It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. +For Cisco devices: +It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. -### Configuration +## Configuration ```toml [[inputs.gnmi]] @@ -66,8 +66,9 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- # heartbeat_interval = "60s" ``` -### Example Output -``` +## Example Output + +```shell ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 ``` diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 6a835f1d60a4f..8f07147c6b5eb 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -4,15 +4,14 @@ The Graylog plugin can collect data from remote Graylog service URLs. Plugin currently support two type of end points:- -- multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) -- namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) +- multiple (e.g. `http://[graylog-server-ip]:12900/system/metrics/multiple`) +- namespace (e.g. `http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}`) End Point can be a mix of one multiple end point and several namespaces end points - Note: if namespace end point specified metrics array will be ignored for that call. -### Configuration: +## Configuration ```toml # Read flattened metrics from one or more GrayLog HTTP endpoints @@ -52,4 +51,4 @@ Note: if namespace end point specified metrics array will be ignored for that ca # insecure_skip_verify = false ``` -Please refer to GrayLog metrics api browser for full metric end points http://host:12900/api-browser +Please refer to GrayLog metrics api browser for full metric end points `http://host:12900/api-browser` diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index 5739969e3df01..108d3bc28dad6 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -6,9 +6,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -172,8 +172,8 @@ func TestHttpJson500(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -183,8 +183,8 @@ func TestHttpJsonBadJson(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response objectgT @@ -194,6 +194,6 @@ func TestHttpJsonEmptyResponse(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 86fbb986b696a..4ce0070d91820 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -5,7 +5,7 @@ The [HAProxy](http://www.haproxy.org/) input plugin gathers using the [stats socket](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9.3) or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9) of a HAProxy server. -### Configuration: +## Configuration ```toml # Read metrics of HAProxy, via socket or HTTP stats page @@ -40,7 +40,7 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management. # insecure_skip_verify = false ``` -#### HAProxy Configuration +### HAProxy Configuration The following information may be useful when getting started, but please consult the HAProxy documentation for complete and up to date instructions. @@ -51,8 +51,7 @@ settings. To enable the unix socket begin by reading about the [`stats socket`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#3.1-stats%20socket) option. - -#### servers +### servers Server addresses must explicitly start with 'http' if you wish to use HAProxy status page. Otherwise, addresses will be assumed to be an UNIX socket and @@ -65,14 +64,14 @@ To use HTTP Basic Auth add the username and password in the userinfo section of the URL: `http://user:password@1.2.3.4/haproxy?stats`. The credentials are sent via the `Authorization` header and not using the request URL. - -#### keep_field_names +### keep_field_names By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names. The following renames are made: + - `pxname` -> `proxy` - `svname` -> `sv` - `act` -> `active_servers` @@ -86,7 +85,7 @@ The following renames are made: - `hrsp_5xx` -> `http_response.5xx` - `hrsp_other` -> `http_response.other` -### Metrics: +## Metrics For more details about collected metrics reference the [HAProxy CSV format documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). @@ -110,7 +109,8 @@ documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). - `lastsess` (int) - **all other stats** (int) -### Example Output: -``` +## Example Output + +```shell haproxy,server=/run/haproxy/admin.sock,proxy=public,sv=FRONTEND,type=frontend http_response.other=0i,req_rate_max=1i,comp_byp=0i,status="OPEN",rate_lim=0i,dses=0i,req_rate=0i,comp_rsp=0i,bout=9287i,comp_in=0i,mode="http",smax=1i,slim=2000i,http_response.1xx=0i,conn_rate=0i,dreq=0i,ereq=0i,iid=2i,rate_max=1i,http_response.2xx=1i,comp_out=0i,intercepted=1i,stot=2i,pid=1i,http_response.5xx=1i,http_response.3xx=0i,http_response.4xx=0i,conn_rate_max=1i,conn_tot=2i,dcon=0i,bin=294i,rate=0i,sid=0i,req_tot=2i,scur=0i,dresp=0i 1513293519000000000 ``` diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index d2d3e4f13ec89..71801a4eb1447 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -4,7 +4,7 @@ This plugin reads data from hddtemp daemon. Hddtemp should be installed and its daemon running. -### Configuration +## Configuration ```toml [[inputs.hddtemp]] @@ -19,7 +19,7 @@ Hddtemp should be installed and its daemon running. # devices = ["sda", "*"] ``` -### Metrics +## Metrics - hddtemp - tags: @@ -31,10 +31,9 @@ Hddtemp should be installed and its daemon running. - fields: - temperature +## Example output -### Example output - -``` +```shell hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000 hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000 diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 769022049d17a..44be91bb28bf9 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,7 +3,6 @@ package hddtemp import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" @@ -44,7 +43,7 @@ func TestFetch(t *testing.T) { err := hddTemp.Gather(acc) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) var tests = []struct { fields map[string]interface{} diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 95591b9f0ad22..11385806dd8ea 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -2,8 +2,7 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The endpoint should have metrics formatted in one of the supported [input data formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own unique set of configuration options which can be added to the input configuration. - -### Configuration: +## Configuration ```toml # Read formatted metrics from one or more HTTP endpoints @@ -73,7 +72,7 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ``` -### Metrics: +## Metrics The metrics collected by this input plugin will depend on the configured `data_format` and the payload returned by the HTTP endpoint(s). @@ -83,6 +82,6 @@ The default values below are added if the input format does not specify a value: - tags: - url -### Optional Cookie Authentication Settings: +## Optional Cookie Authentication Settings The optional Cookie Authentication Settings will retrieve a cookie from the given authorization endpoint, and use it in subsequent API requests. This is useful for services that do not provide OAuth or Basic Auth authentication, e.g. the [Tesla Powerwall API](https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network), which uses a Cookie Auth Body to retrieve an authorization cookie. The Cookie Auth Renewal interval will renew the authorization by retrieving a new cookie at the given interval. diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index a87ec3f833890..9eebb3cd9a2aa 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,15 +1,15 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in -[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] -or [`influxdb_v2_listener`][influxdb_v2_listener] instead. +HTTP. Metrics may be sent in any supported [data format][data_format]. For metrics in +[InfluxDB Line Protocol][line_protocol] it's recommended to use the [`influxdb_listener`][influxdb_listener] +or [`influxdb_v2_listener`][influxdb_v2_listener] instead. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener] or [`influxdb_v2_listener`][influxdb_v2_listener]. -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -69,24 +69,27 @@ This is a sample configuration for the plugin. data_format = "influx" ``` -### Metrics: +## Metrics Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`. -### Troubleshooting: +## Troubleshooting -**Send Line Protocol** -``` +Send Line Protocol: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -**Send JSON** -``` +Send JSON: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' ``` -**Send query params** -``` +Send query params: + +```shell curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' ``` diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index d2a2e5f35214e..85dbf89f14765 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -4,6 +4,7 @@ import ( "compress/gzip" "crypto/subtle" "crypto/tls" + "errors" "io" "net" "net/http" @@ -50,12 +51,15 @@ type HTTPListenerV2 struct { BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` HTTPHeaderTags map[string]string `toml:"http_header_tags"` + tlsint.ServerConfig + tlsConf *tls.Config TimeFunc Log telegraf.Logger - wg sync.WaitGroup + wg sync.WaitGroup + close chan struct{} listener net.Listener @@ -154,44 +158,34 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { h.acc = acc - tlsConf, err := h.ServerConfig.TLSConfig() - if err != nil { - return err - } - - server := &http.Server{ - Addr: h.ServiceAddress, - Handler: h, - ReadTimeout: time.Duration(h.ReadTimeout), - WriteTimeout: time.Duration(h.WriteTimeout), - TLSConfig: tlsConf, - } - - var listener net.Listener - if tlsConf != nil { - listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) - } else { - listener, err = net.Listen("tcp", h.ServiceAddress) - } - if err != nil { - return err - } - h.listener = listener - h.Port = listener.Addr().(*net.TCPAddr).Port + server := h.createHTTPServer() h.wg.Add(1) go func() { defer h.wg.Done() if err := server.Serve(h.listener); err != nil { - h.Log.Errorf("Serve failed: %v", err) + if !errors.Is(err, net.ErrClosed) { + h.Log.Errorf("Serve failed: %v", err) + } + close(h.close) } }() - h.Log.Infof("Listening on %s", listener.Addr().String()) + h.Log.Infof("Listening on %s", h.listener.Addr().String()) return nil } +func (h *HTTPListenerV2) createHTTPServer() *http.Server { + return &http.Server{ + Addr: h.ServiceAddress, + Handler: h, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), + TLSConfig: h.tlsConf, + } +} + // Stop cleans up all resources func (h *HTTPListenerV2) Stop() { if h.listener != nil { @@ -202,6 +196,28 @@ func (h *HTTPListenerV2) Stop() { h.wg.Wait() } +func (h *HTTPListenerV2) Init() error { + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + var listener net.Listener + if tlsConf != nil { + listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) + } else { + listener, err = net.Listen("tcp", h.ServiceAddress) + } + if err != nil { + return err + } + h.tlsConf = tlsConf + h.listener = listener + h.Port = listener.Addr().(*net.TCPAddr).Port + + return nil +} + func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { handler := h.serveWrite @@ -213,6 +229,13 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { + select { + case <-h.close: + res.WriteHeader(http.StatusGone) + return + default: + } + // Check that the content length is not too large for us to handle. if req.ContentLength > int64(h.MaxBodySize) { if err := tooLarge(res); err != nil { @@ -393,6 +416,7 @@ func init() { Paths: []string{"/telegraf"}, Methods: []string{"POST", "PUT"}, DataSource: body, + close: make(chan struct{}), } }) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index bf320d6f05174..ddbb5be64ed52 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -56,6 +56,7 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { TimeFunc: time.Now, MaxBodySize: config.Size(70000), DataSource: "body", + close: make(chan struct{}), } return listener } @@ -78,6 +79,7 @@ func newTestHTTPSListenerV2() *HTTPListenerV2 { Parser: parser, ServerConfig: *pki.TLSServerConfig(), TimeFunc: time.Now, + close: make(chan struct{}), } return listener @@ -117,10 +119,10 @@ func TestInvalidListenerConfig(t *testing.T) { TimeFunc: time.Now, MaxBodySize: config.Size(70000), DataSource: "body", + close: make(chan struct{}), } - acc := &testutil.Accumulator{} - require.Error(t, listener.Start(acc)) + require.Error(t, listener.Init()) // Stop is called when any ServiceInput fails to start; it must succeed regardless of state listener.Stop() @@ -131,6 +133,7 @@ func TestWriteHTTPSNoClientAuth(t *testing.T) { listener.TLSAllowedCACerts = nil acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -155,6 +158,7 @@ func TestWriteHTTPSWithClientAuth(t *testing.T) { listener := newTestHTTPSListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -169,6 +173,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { listener := newTestHTTPAuthListener() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -187,6 +192,7 @@ func TestWriteHTTP(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -237,6 +243,7 @@ func TestWriteHTTPWithPathTag(t *testing.T) { listener.PathTag = true acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -260,6 +267,7 @@ func TestWriteHTTPWithMultiplePaths(t *testing.T) { listener.PathTag = true acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -292,6 +300,7 @@ func TestWriteHTTPNoNewline(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -319,9 +328,11 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { Parser: parser, MaxBodySize: config.Size(len(hugeMetric)), TimeFunc: time.Now, + close: make(chan struct{}), } acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -342,9 +353,11 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { Parser: parser, MaxBodySize: config.Size(4096), TimeFunc: time.Now, + close: make(chan struct{}), } acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -359,6 +372,8 @@ func TestWriteHTTPGzippedData(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -391,6 +406,7 @@ func TestWriteHTTPSnappyData(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -429,6 +445,7 @@ func TestWriteHTTPHighTraffic(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -464,6 +481,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -478,6 +496,7 @@ func TestWriteHTTPInvalid(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -492,6 +511,7 @@ func TestWriteHTTPEmpty(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -507,6 +527,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -545,6 +566,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -576,6 +598,7 @@ func TestWriteHTTPQueryParams(t *testing.T) { listener.Parser = parser acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -597,6 +620,7 @@ func TestWriteHTTPFormData(t *testing.T) { listener.Parser = parser acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 81b512e80743f..bd800457faf8a 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -2,7 +2,7 @@ This input plugin checks HTTP/HTTPS connections. -### Configuration: +## Configuration ```toml # HTTP/HTTPS request given an address a method and a timeout @@ -79,7 +79,7 @@ This input plugin checks HTTP/HTTPS connections. # interface = "eth0" ``` -### Metrics: +## Metrics - http_response - tags: @@ -96,7 +96,7 @@ This input plugin checks HTTP/HTTPS connections. - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) -#### `result` / `result_code` +### `result` / `result_code` Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value. @@ -112,9 +112,8 @@ This tag is used to expose network and plugin errors. HTTP errors are considered |dns_error | 5 |There was a DNS error while attempting to connect to the host| |response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| +## Example Output -### Example Output: - -``` +```shell http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 799f664d1e7b0..f0da6294aa263 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -28,7 +28,7 @@ const ( // HTTPResponse struct type HTTPResponse struct { - Address string // deprecated in 1.12 + Address string `toml:"address" deprecated:"1.12.0;use 'urls' instead"` URLs []string `toml:"urls"` HTTPProxy string `toml:"http_proxy"` Body string diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 5d109d0a35439..0d537f5358433 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -16,12 +16,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Receives a list with fields that are expected to be absent @@ -168,8 +168,8 @@ func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[stri func TestHeaders(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cHeader := r.Header.Get("Content-Type") - assert.Equal(t, "Hello", r.Host) - assert.Equal(t, "application/json", cHeader) + require.Equal(t, "Hello", r.Host) + require.Equal(t, "application/json", cHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() @@ -1100,7 +1100,7 @@ func TestRedirect(t *testing.T) { func TestBasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { aHeader := r.Header.Get("Authorization") - assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) + require.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() @@ -1277,7 +1277,7 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { func TestSNI(t *testing.T) { ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) + require.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) w.WriteHeader(http.StatusOK) })) defer ts.Close() diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 19fe014457734..8782e71e3eb44 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,10 +1,10 @@ # HTTP JSON Input Plugin -The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. +## DEPRECATED in Telegraf v1.6: Use [HTTP input plugin][] as replacement -Deprecated (1.6): use the [http](../http) input. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. -### Configuration: +## Configuration ```toml [[inputs.httpjson]] @@ -54,28 +54,28 @@ Deprecated (1.6): use the [http](../http) input. # apiVersion = "v1" ``` -### Measurements & Fields: +## Measurements & Fields - httpjson - - response_time (float): Response time in seconds + - response_time (float): Response time in seconds Additional fields are dependant on the response of the remote service being polled. -### Tags: +## Tags - All measurements have the following tags: - - server: HTTP origin as defined in configuration as `servers`. + - server: HTTP origin as defined in configuration as `servers`. Any top level keys listed under `tag_keys` in the configuration are added as tags. Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects. - -### Examples Output: +## Examples Output This plugin understands responses containing a single JSON object, or a JSON Array of Objects. **Object Output:** Given the following response body: + ```json { "a": 0.5, @@ -87,6 +87,7 @@ Given the following response body: "service": "service01" } ``` + The following metric is produced: `httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` @@ -133,3 +134,5 @@ If the service returns an array of objects, one metric is be created for each ob `httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` `httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` + +[HTTP input plugin]: /plugins/inputs/http diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index b203238a94037..c522ebe9978d2 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -8,9 +8,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -212,7 +212,7 @@ func TestHttpJson200(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(service.Gather) require.NoError(t, err) - assert.Equal(t, 12, acc.NFields()) + require.Equal(t, 12, acc.NFields()) // Set responsetime for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 @@ -231,7 +231,7 @@ func TestHttpJson200(t *testing.T) { func TestHttpJsonGET_URL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -304,7 +304,7 @@ func TestHttpJsonGET(t *testing.T) { } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) _, err := fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -378,8 +378,8 @@ func TestHttpJsonPOST(t *testing.T) { } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) - assert.NoError(t, err) - assert.Equal(t, "api_key=mykey", string(body)) + require.NoError(t, err) + require.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) _, err = fmt.Fprintln(w, validJSON2) require.NoError(t, err) @@ -453,8 +453,8 @@ func TestHttpJson500(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to HTTP 405 @@ -465,8 +465,8 @@ func TestHttpJsonBadMethod(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -476,8 +476,8 @@ func TestHttpJsonBadJson(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response object @@ -486,7 +486,7 @@ func TestHttpJsonEmptyResponse(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.NoError(t, err) + require.NoError(t, err) } // Test that the proper values are ignored or collected @@ -502,7 +502,7 @@ func TestHttpJson200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 4, acc.NFields()) + require.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv, "role": "master", "build": "123"} fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} @@ -540,22 +540,22 @@ func TestHttpJsonArray200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 8, acc.NFields()) - assert.Equal(t, uint64(4), acc.NMetrics()) + require.Equal(t, 8, acc.NFields()) + require.Equal(t, uint64(4), acc.NMetrics()) for _, m := range acc.Metrics { if m.Tags["role"] == "master" { - assert.Equal(t, "123", m.Tags["build"]) - assert.Equal(t, float64(15), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "123", m.Tags["build"]) + require.Equal(t, float64(15), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else if m.Tags["role"] == "slave" { - assert.Equal(t, "456", m.Tags["build"]) - assert.Equal(t, float64(17), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "456", m.Tags["build"]) + require.Equal(t, float64(17), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else { - assert.FailNow(t, "unknown metric") + require.FailNow(t, "unknown metric") } } } diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md index fb36d36f3730f..c6ecadb0c704c 100644 --- a/plugins/inputs/icinga2/README.md +++ b/plugins/inputs/icinga2/README.md @@ -6,7 +6,7 @@ The icinga2 plugin uses the icinga2 remote API to gather status on running services and hosts. You can read Icinga2's documentation for their remote API [here](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api) -### Configuration: +## Configuration ```toml # Description @@ -32,24 +32,24 @@ services and hosts. You can read Icinga2's documentation for their remote API # insecure_skip_verify = true ``` -### Measurements & Fields: +## Measurements & Fields - All measurements have the following fields: - - name (string) - - state_code (int) + - name (string) + - state_code (int) -### Tags: +## Tags - All measurements have the following tags: - - check_command - The short name of the check command - - display_name - The name of the service or host - - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services - - source - The icinga2 host - - port - The icinga2 port - - scheme - The icinga2 protocol (http/https) - - server - The server the check_command is running for + - check_command - The short name of the check command + - display_name - The name of the service or host + - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services + - source - The icinga2 host + - port - The icinga2 port + - scheme - The icinga2 protocol (http/https) + - server - The server the check_command is running for -### Sample Queries: +## Sample Queries ```sql SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status @@ -58,9 +58,9 @@ SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // SELECT * FROM "icinga2_services" WHERE state_code = 3 AND time > now() - 24h // Service with UNKNOWN status ``` -### Example Output: +## Example Output -``` +```text $ ./telegraf -config telegraf.conf -input-filter icinga2 -test icinga2_hosts,display_name=router-fr.eqx.fr,check_command=hostalive-custom,host=test-vm,source=localhost,port=5665,scheme=https,state=ok name="router-fr.eqx.fr",state=0 1492021603000000000 ``` diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md index bc5b03543c375..28eed67c7b376 100644 --- a/plugins/inputs/infiniband/README.md +++ b/plugins/inputs/infiniband/README.md @@ -6,14 +6,14 @@ system. These are the counters that can be found in **Supported Platforms**: Linux -### Configuration +## Configuration ```toml [[inputs.infiniband]] # no configuration ``` -### Metrics +## Metrics Actual metrics depend on the InfiniBand devices, the plugin uses a simple mapping from counter -> counter value. @@ -49,10 +49,8 @@ mapping from counter -> counter value. - unicast_xmit_packets (integer) - VL15_dropped (integer) +## Example Output - -### Example Output - -``` +```shell infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 ``` diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 9a2db484601fd..3d4ac5a8d40de 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -1,13 +1,13 @@ # InfluxDB Input Plugin -The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our -[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) -for detailed information about `influxdb` metrics. +The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our +[documentation](https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/) +for detailed information about `influxdb` metrics. This plugin can also gather metrics from endpoints that expose InfluxDB-formatted endpoints. See below for more information. -### Configuration: +## Configuration ```toml # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints @@ -37,62 +37,230 @@ InfluxDB-formatted endpoints. See below for more information. timeout = "5s" ``` -### Measurements & Fields +## Measurements & Fields **Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: -- influxdb - - n_shards: The total number of shards in the specified database. -- influxdb_ae _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. -- influxdb_cluster _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. -- influxdb_cq: The metrics related to continuous queries (CQs). -- influxdb_database: The database metrics are being collected from. -- influxdb_hh _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. -- influxdb_hh_database _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. -- influxdb_hh_processor _(Enterprise Only)_ : Statistics stored for a single queue (shard). -- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. -- influxdb_measurement: The measurement that metrics are collected from. -- influxdb_memstats: Statistics about the memory allocator in the specified database. - - heap_inuse: The number of bytes in in-use spans. - - heap_released: The number of bytes of physical memory returned to the OS. - - mspan_inuse: The number of bytes in in-use mspans. - - total_alloc: The cumulative bytes allocated for heap objects. - - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. - - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.) - - frees: The cumulative number of freed (live) heap objects. - - heap_idle: The number of bytes of idle heap objects. - - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds. - - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. - - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. - - mcache_sys: The bytes of memory obtained from the OS for mcache structures. - - next_gc: The target heap size of the next garbage collection cycle. - - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle. - - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys. - - alloc: The currently allocated number of bytes of heap objects. - - stack_inuse: The number of bytes in in-use stacks. - - stack_sys: The total number of bytes of memory obtained from the stack in use. - - buck_hash_sys: The bytes of memory in profiling bucket hash tables. - - gc_sys: The bytes of memory in garbage collection metadata. - - num_gc: The number of completed garbage collection cycles. - - heap_alloc: The size, in bytes, of all heap objects. - - heap_objects: The number of allocated heap objects. - - mspan_sys: The bytes of memory obtained from the OS for mspan. - - mcache_inuse: The bytes of allocated mcache structures. - - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). -- influxdb_queryExecutor: Query Executor metrics of the InfluxDB engine. -- influxdb_rpc _(Enterprise Only)_ : Statistics are related to the use of RPC calls within InfluxDB Enterprise clusters. -- influxdb_runtime: The shard metrics are collected from. -- influxdb_shard: The shard metrics are collected from. -- influxdb_subscriber: The InfluxDB subscription that metrics are collected from. -- influxdb_tsm1_cache: The TSM cache that metrics are collected from. -- influxdb_tsm1_engine: The TSM storage engine that metrics are collected from. -- influxdb_tsm1_filestore: The TSM file store that metrics are collected from. -- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. -- influxdb_write: The total writes to the specified database. - -### Example Output: +- **influxdb_ae** _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. + - **bytesRx**: Number of bytes received by the data node. + - **errors**: Total number of anti-entropy jobs that have resulted in errors. + - **jobs**: Total number of jobs executed by the data node. + - **jobsActive**: Number of active (currently executing) jobs. +- **influxdb_cluster** _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. + - **copyShardReq**: Number of internal requests made to copy a shard from one data node to another. + - **createIteratorReq**: Number of read requests from other data nodes in the cluster. + - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. + - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. + - **iteratorCostReq**: Number of internal requests for iterator cost. + - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. + - **writeShardFail**: Total number of internal write requests from a remote node that failed. + - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. + - **writeShardReq**: Number of internal write requests from a remote data node, regardless of success. +- **influxdb_cq**: Metrics related to continuous queries (CQs). + - **queryFail**: Total number of continuous queries that executed but failed. + - **queryOk**: Total number of continuous queries that executed successfully. +- **influxdb_database**: Database metrics are collected from. + - **numMeasurements**: Current number of measurements in the specified database. + - **numSeries**: Current series cardinality of the specified database. +- **influxdb_hh** _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. + - **writeShardReq**: Number of initial write requests handled by the hinted handoff engine for a remote node. + - **writeShardReqPoints**: Number of write requests for each point in the initial request to the hinted handoff engine for a remote node. +- **influxdb_hh_database** _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_httpd**: Metrics related to the InfluxDB HTTP server. + - **authFail**: Number of HTTP requests that were aborted due to authentication being required, but not supplied or incorrect. + - **clientError**: Number of HTTP responses due to client errors, with a 4XX HTTP status code. + - **fluxQueryReq**: Number of Flux query requests served. + - **fluxQueryReqDurationNs**: Duration (wall-time), in nanoseconds, spent executing Flux query requests. + - **pingReq**: Number of times InfluxDB HTTP server served the /ping HTTP endpoint. + - **pointsWrittenDropped**: Number of points dropped by the storage engine. + - **pointsWrittenFail**: Number of points accepted by the HTTP /write endpoint, but unable to be persisted. + - **pointsWrittenOK**: Number of points successfully accepted and persisted by the HTTP /write endpoint. + - **promReadReq**: Number of read requests to the Prometheus /read endpoint. + - **promWriteReq**: Number of write requests to the Prometheus /write endpoint. + - **queryReq**: Number of query requests. + - **queryReqDurationNs**: Total query request duration, in nanosecond (ns). + - **queryRespBytes**: Total number of bytes returned in query responses. + - **recoveredPanics**: Total number of panics recovered by the HTTP handler. + - **req**: Total number of HTTP requests served. + - **reqActive**: Number of currently active requests. + - **reqDurationNs**: Duration (wall time), in nanoseconds, spent inside HTTP requests. + - **serverError**: Number of HTTP responses due to server errors. + - **statusReq**: Number of status requests served using the HTTP /status endpoint. + - **valuesWrittenOK**: Number of values (fields) successfully accepted and persisted by the HTTP /write endpoint. + - **writeReq**: Number of write requests served using the HTTP /write endpoint. + - **writeReqActive**: Number of currently active write requests. + - **writeReqBytes**: Total number of bytes of line protocol data received by write requests, using the HTTP /write endpoint. + - **writeReqDurationNs**: Duration, in nanoseconds, of write requests served using the /write HTTP endpoint. +- **influxdb_memstats**: Statistics about the memory allocator in the specified database. + - **Alloc**: Number of bytes allocated to heap objects. + - **BuckHashSys**: Number of bytes of memory in profiling bucket hash tables. + - **Frees**: Cumulative count of heap objects freed. + - **GCCPUFraction**: fraction of InfluxDB's available CPU time used by the garbage collector (GC) since InfluxDB started. + - **GCSys**: Number of bytes of memory in garbage collection metadata. + - **HeapAlloc**: Number of bytes of allocated heap objects. + - **HeapIdle**: Number of bytes in idle (unused) spans. + - **HeapInuse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **LastGC**: Time the last garbage collection finished. + - **Lookups**: Number of pointer lookups performed by the runtime. + - **MCacheInuse**: Number of bytes of allocated mcache structures. + - **MCacheSys**: Number of bytes of memory obtained from the OS for mcache structures. + - **MSpanInuse**: Number of bytes of allocated mspan structures. + - **MSpanSys**: Number of bytes of memory obtained from the OS for mspan structures. + - **Mallocs**: Cumulative count of heap objects allocated. + - **NextGC**: Target heap size of the next GC cycle. + - **NumForcedGC**: Number of GC cycles that were forced by the application calling the GC function. + - **NumGC**: Number of completed GC cycles. + - **OtherSys**: Number of bytes of memory in miscellaneous off-heap runtime allocations. + - **PauseTotalNs**: Cumulative nanoseconds in GC stop-the-world pauses since the program started. + - **StackInuse**: Number of bytes in stack spans. + - **StackSys**: Number of bytes of stack memory obtained from the OS. + - **Sys**: Total bytes of memory obtained from the OS. + - **TotalAlloc**: Cumulative bytes allocated for heap objects. +- **influxdb_queryExecutor**: Metrics related to usage of the Query Executor of the InfluxDB engine. + - **queriesActive**: Number of active queries currently being handled. + - **queriesExecuted**: Number of queries executed (started). + - **queriesFinished**: Number of queries that have finished executing. + - **queryDurationNs**: Total duration, in nanoseconds, of executed queries. + - **recoveredPanics**: Number of panics recovered by the Query Executor. +- **influxdb_rpc** _(Enterprise Only)_ : Statistics related to the use of RPC calls within InfluxDB Enterprise clusters. + - **idleStreams**: Number of idle multiplexed streams across all live TCP connections. + - **liveConnections**: Current number of live TCP connections to other nodes. + - **liveStreams**: Current number of live multiplexed streams across all live TCP connections. + - **rpcCalls**: Total number of RPC calls made to remote nodes. + - **rpcFailures**: Total number of RPC failures, which are RPCs that did not recover. + - **rpcReadBytes**: Total number of RPC bytes read. + - **rpcRetries**: Total number of RPC calls that retried at least once. + - **rpcWriteBytes**: Total number of RPC bytes written. + - **singleUse**: Total number of single-use connections opened using Dial. + - **singleUseOpen**: Number of single-use connections currently open. + - **totalConnections**: Total number of TCP connections that have been established. + - **totalStreams**: Total number of streams established. +- **influxdb_runtime**: Subset of memstat record statistics for the Go memory allocator. + - **Alloc**: Currently allocated number of bytes of heap objects. + - **Frees**: Cumulative number of freed (live) heap objects. + - **HeapAlloc**: Size, in bytes, of all heap objects. + - **HeapIdle**: Number of bytes of idle heap objects. + - **HeapInUse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. + - **Lookups**: Number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. + - **Mallocs**: Total number of heap objects allocated. The total number of live objects is Frees. + - **NumGC**: Number of completed GC (garbage collection) cycles. + - **NumGoroutine**: Total number of Go routines. + - **PauseTotalNs**: Total duration, in nanoseconds, of total GC (garbage collection) pauses. + - **Sys**: Total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. + - **TotalAlloc**: Total number of bytes allocated for heap objects. This statistic does not decrease when objects are freed. +- **influxdb_shard**: Metrics related to InfluxDB shards. + - **diskBytes**: Size, in bytes, of the shard, including the size of the data directory and the WAL directory. + - **fieldsCreate**: Number of fields created. + - **indexType**: Type of index inmem or tsi1. + - **n_shards**: Total number of shards in the specified database. + - **seriesCreate**: Number of series created. + - **writeBytes**: Number of bytes written to the shard. + - **writePointsDropped**: Number of requests to write points t dropped from a write. + - **writePointsErr**: Number of requests to write points that failed to be written due to errors. + - **writePointsOk**: Number of points written successfully. + - **writeReq**: Total number of write requests. + - **writeReqErr**: Total number of write requests that failed due to errors. + - **writeReqOk**: Total number of successful write requests. +- **influxdb_subscriber**: InfluxDB subscription metrics. + - **createFailures**: Number of subscriptions that failed to be created. + - **pointsWritten**: Total number of points that were successfully written to subscribers. + - **writeFailures**: Total number of batches that failed to be written to subscribers. +- **influxdb_tsm1_cache**: TSM cache metrics. + - **cacheAgeMs**: Duration, in milliseconds, since the cache was last snapshotted at sample time. + - **cachedBytes**: Total number of bytes that have been written into snapshots. + - **diskBytes**: Size, in bytes, of on-disk snapshots. + - **memBytes**: Size, in bytes, of in-memory cache. + - **snapshotCount**: Current level (number) of active snapshots. + - **WALCompactionTimeMs**: Duration, in milliseconds, that the commit lock is held while compacting snapshots. + - **writeDropped**: Total number of writes dropped due to timeouts. + - **writeErr**: Total number of writes that failed. + - **writeOk**: Total number of successful writes. +- **influxdb_tsm1_engine**: TSM storage engine metrics. + - **cacheCompactionDuration** Duration (wall time), in nanoseconds, spent in cache compactions. + - **cacheCompactionErr** Number of cache compactions that have failed due to errors. + - **cacheCompactions** Total number of cache compactions that have ever run. + - **cacheCompactionsActive** Number of cache compactions that are currently running. + - **tsmFullCompactionDuration** Duration (wall time), in nanoseconds, spent in full compactions. + - **tsmFullCompactionErr** Total number of TSM full compactions that have failed due to errors. + - **tsmFullCompactionQueue** Current number of pending TMS Full compactions. + - **tsmFullCompactions** Total number of TSM full compactions that have ever run. + - **tsmFullCompactionsActive** Number of TSM full compactions currently running. + - **tsmLevel1CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 1 compactions. + - **tsmLevel1CompactionErr** Total number of TSM level 1 compactions that have failed due to errors. + - **tsmLevel1CompactionQueue** Current number of pending TSM level 1 compactions. + - **tsmLevel1Compactions** Total number of TSM level 1 compactions that have ever run. + - **tsmLevel1CompactionsActive** Number of TSM level 1 compactions that are currently running. + - **tsmLevel2CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 2 compactions. + - **tsmLevel2CompactionErr** Number of TSM level 2 compactions that have failed due to errors. + - **tsmLevel2CompactionQueue** Current number of pending TSM level 2 compactions. + - **tsmLevel2Compactions** Total number of TSM level 2 compactions that have ever run. + - **tsmLevel2CompactionsActive** Number of TSM level 2 compactions that are currently running. + - **tsmLevel3CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 3 compactions. + - **tsmLevel3CompactionErr** Number of TSM level 3 compactions that have failed due to errors. + - **tsmLevel3CompactionQueue** Current number of pending TSM level 3 compactions. + - **tsmLevel3Compactions** Total number of TSM level 3 compactions that have ever run. + - **tsmLevel3CompactionsActive** Number of TSM level 3 compactions that are currently running. + - **tsmOptimizeCompactionDuration** Duration (wall time), in nanoseconds, spent during TSM optimize compactions. + - **tsmOptimizeCompactionErr** Total number of TSM optimize compactions that have failed due to errors. + - **tsmOptimizeCompactionQueue** Current number of pending TSM optimize compactions. + - **tsmOptimizeCompactions** Total number of TSM optimize compactions that have ever run. + - **tsmOptimizeCompactionsActive** Number of TSM optimize compactions that are currently running. +- **influxdb_tsm1_filestore**: The TSM file store metrics. + - **diskBytes**: Size, in bytes, of disk usage by the TSM file store. + - **numFiles**: Total number of files in the TSM file store. +- **influxdb_tsm1_wal**: The TSM Write Ahead Log (WAL) metrics. + - **currentSegmentDiskBytes**: Current size, in bytes, of the segment disk. + - **oldSegmentDiskBytes**: Size, in bytes, of the segment disk. + - **writeErr**: Number of writes that failed due to errors. + - **writeOK**: Number of writes that succeeded. +- **influxdb_write**: Metrics related to InfluxDB writes. + - **pointReq**: Total number of points requested to be written. + - **pointReqHH** _(Enterprise only)_: Total number of points received for write by this node and then enqueued into hinted handoff for the destination node. + - **pointReqLocal** _(Enterprise only)_: Total number of point requests that have been attempted to be written into a shard on the same (local) node. + - **pointReqRemote** _(Enterprise only)_: Total number of points received for write by this node but needed to be forwarded into a shard on a remote node. + - **pointsWrittenOK**: Number of points written to the HTTP /write endpoint and persisted successfully. + - **req**: Total number of batches requested to be written. + - **subWriteDrop**: Total number of batches that failed to be sent to the subscription dispatcher. + - **subWriteOk**: Total number of batches successfully sent to the subscription dispatcher. + - **valuesWrittenOK**: Number of values (fields) written to the HTTP /write endpoint and persisted successfully. + - **writeDrop**: Total number of write requests for points that have been dropped due to timestamps not matching any existing retention policies. + - **writeError**: Total number of batches of points that were not successfully written, due to a failure to write to a local or remote shard. + - **writeOk**: Total number of batches of points written at the requested consistency level. + - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. + - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. -``` +## Example Output + +```sh telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test * Plugin: influxdb, Collection 1 > influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636 @@ -124,7 +292,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test > influxdb_shard,host=tyrion n_shards=4i 1463590500247354636 ``` -### InfluxDB-formatted endpoints +## InfluxDB-formatted endpoints The influxdb plugin can collect InfluxDB-formatted data from JSON endpoints. Whether associated with an Influx database or not. diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index 0912c36087b75..19cc1069ae658 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -18,7 +18,7 @@ receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. -### Configuration: +## Configuration ```toml [[inputs.influxdb_listener]] @@ -64,14 +64,15 @@ submits data to InfluxDB determines the destination database. # basic_password = "barfoo" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md index 71fa6c19bee3a..11c95c6968d17 100644 --- a/plugins/inputs/influxdb_v2_listener/README.md +++ b/plugins/inputs/influxdb_v2_listener/README.md @@ -11,7 +11,7 @@ defer to the output plugins configuration. Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration ```toml [[inputs.influxdb_v2_listener]] @@ -42,14 +42,15 @@ Telegraf minimum version: Telegraf 1.16.0 # token = "some-long-shared-secret-token" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` diff --git a/plugins/inputs/intel_pmu/README.md b/plugins/inputs/intel_pmu/README.md new file mode 100644 index 0000000000000..92a07d14e68f6 --- /dev/null +++ b/plugins/inputs/intel_pmu/README.md @@ -0,0 +1,210 @@ +# Intel Performance Monitoring Unit Plugin + +This input plugin exposes Intel PMU (Performance Monitoring Unit) metrics available through [Linux Perf](https://perf.wiki.kernel.org/index.php/Main_Page) subsystem. + +PMU metrics gives insight into performance and health of IA processor's internal components, +including core and uncore units. With the number of cores increasing and processor topology getting more complex +the insight into those metrics is vital to assure the best CPU performance and utilization. + +Performance counters are CPU hardware registers that count hardware events such as instructions executed, cache-misses suffered, or branches mispredicted. +They form a basis for profiling applications to trace dynamic control flow and identify hotspots. + +## Configuration + +```toml +# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +[[inputs.intel_pmu]] + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" +``` + +### Modifiers + +Perf modifiers adjust event-specific perf attribute to fulfill particular requirements. +Details about perf attribute structure could be found in [perf_event_open](https://man7.org/linux/man-pages/man2/perf_event_open.2.html) syscall manual. + +General schema of configuration's `events` list element: + +```regexp +EVENT_NAME(:(config|config1|config2)=(0x[0-9a-f]{1-16})(p|k|u|h|H|I|G|D))* +``` + +where: + +| Modifier | Underlying attribute | Description | +|----------|---------------------------------|-----------------------------| +| config | perf_event_attr.config | type-specific configuration | +| config1 | perf_event_attr.config1 | extension of config | +| config2 | perf_event_attr.config2 | extension of config1 | +| p | perf_event_attr.precise_ip | skid constraint | +| k | perf_event_attr.exclude_user | don't count user | +| u | perf_event_attr.exclude_kernel | don't count kernel | +| h / H | perf_event_attr.exclude_guest | don't count in guest | +| I | perf_event_attr.exclude_idle | don't count when idle | +| G | perf_event_attr.exclude_hv | don't count hypervisor | +| D | perf_event_attr.pinned | must always be on PMU | + +## Requirements + +The plugin is using [iaevents](https://github.com/intel/iaevents) library which is a golang package that makes accessing the Linux kernel's perf interface easier. + +Intel PMU plugin, is only intended for use on **linux 64-bit** systems. + +Event definition JSON files for specific architectures can be found at [01.org](https://download.01.org/perfmon/). +A script to download the event definitions that are appropriate for your system (event_download.py) is available at [pmu-tools](https://github.com/andikleen/pmu-tools). +Please keep these files in a safe place on your system. + +## Measuring + +Plugin allows measuring both core and uncore events. During plugin initialization the event names provided by user are compared +with event definitions included in JSON files and translated to perf attributes. Next, those events are activated to start counting. +During every telegraf interval, the plugin reads proper measurement for each previously activated event. + +Each single core event may be counted severally on every available CPU's core. In contrast, uncore events could be placed in +many PMUs within specified CPU package. The plugin allows choosing core ids (core events) or socket ids (uncore events) on which the counting should be executed. +Uncore events are separately activated on all socket's PMUs, and can be exposed as separate +measurement or to be summed up as one measurement. + +Obtained measurements are stored as three values: **Raw**, **Enabled** and **Running**. Raw is a total count of event. Enabled and running are total time the event was enabled and running. +Normally these are the same. If more events are started than available counter slots on the PMU, then multiplexing +occurs and events only run part of the time. Therefore, the plugin provides a 4-th value called **scaled** which is calculated using following formula: +`raw * enabled / running`. + +Events are measured for all running processes. + +### Core event groups + +Perf allows assembling events as a group. A perf event group is scheduled onto the CPU as a unit: it will be put onto the CPU only if all of the events in the group can be put onto the CPU. +This means that the values of the member events can be meaningfully compared — added, divided (to get ratios), and so on — with each other, +since they have counted events for the same set of executed instructions [(source)](https://man7.org/linux/man-pages/man2/perf_event_open.2.html). + +> **NOTE:** +> Be aware that the plugin will throw an error when trying to create core event group of size that exceeds available core PMU counters. +> The error message from perf syscall will be shown as "invalid argument". If you want to check how many PMUs are supported by your Intel CPU, you can use the [cpuid](https://linux.die.net/man/1/cpuid) command. + +### Note about file descriptors + +The plugin opens a number of file descriptors dependent on number of monitored CPUs and number of monitored +counters. It can easily exceed the default per process limit of allowed file descriptors. Depending on +configuration, it might be required to increase the limit of opened file descriptors allowed. +This can be done for example by using `ulimit -n command`. + +## Metrics + +On each Telegraf interval, Intel PMU plugin transmits following data: + +### Metric Fields + +| Field | Type | Description | +|---------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | uint64 | time counter, contains time the associated perf event was enabled | +| running | uint64 | time counter, contains time the event was actually counted | +| raw | uint64 | value counter, contains event count value during the time the event was actually counted | +| scaled | uint64 | value counter, contains approximated value of counter if the event was continuously counted, using scaled = raw * (enabled / running) formula | + +### Metric Tags - common + +| Tag | Description | +|-------|------------------------------| +| host | hostname as read by Telegraf | +| event | name of the event | + +### Metric Tags - core events + +| Tag | Description | +|------------|----------------------------------------------------------------------------------------------------| +| cpu | CPU id as identified by linux OS (either logical cpu id when HT on or physical cpu id when HT off) | +| events_tag | (optional) tag as defined in "intel_pmu.core_events" configuration element | + +### Metric Tags - uncore events + +| Tag | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| socket | socket number as identified by linux OS (physical_package_id) | +| unit_type | type of event-capable PMU that the event was counted for, provides category of PMU that the event was counted for, e.g. cbox for uncore_cbox_1, r2pcie for uncore_r2pcie etc. | +| unit | name of event-capable PMU that the event was counted for, as listed in /sys/bus/event_source/devices/ e.g. uncore_cbox_1, uncore_imc_1 etc. Present for non-aggregated uncore events only | +| events_tag| (optional) tag as defined in "intel_pmu.uncore_events" configuration element | + +## Example outputs + +Event group: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871237051i,running=2871237051i,raw=1171711i,scaled=1171711i 1621254096000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871240713i,running=2871240713i,raw=72340716i,scaled=72340716i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871118275i,running=2871118275i,raw=1646752i,scaled=1646752i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz raw=108802421i,scaled=108802421i,enabled=2871120107i,running=2871120107i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871143950i,running=2871143950i,raw=1316834i,scaled=1316834i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871074681i,running=2871074681i,raw=68728436i,scaled=68728436i 1621254096000000000 +``` + +Uncore event not aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_0,unit_type=cbox enabled=2870630747i,running=2870630747i,raw=183996i,scaled=183996i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_1,unit_type=cbox enabled=2870608194i,running=2870608194i,raw=185703i,scaled=185703i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_2,unit_type=cbox enabled=2870600211i,running=2870600211i,raw=187331i,scaled=187331i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_3,unit_type=cbox enabled=2870593914i,running=2870593914i,raw=184228i,scaled=184228i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_4,unit_type=cbox scaled=195355i,enabled=2870558952i,running=2870558952i,raw=195355i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_5,unit_type=cbox enabled=2870554131i,running=2870554131i,raw=197756i,scaled=197756i 1621254096000000000 +``` + +Uncore event aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit_type=cbox enabled=13199712335i,running=13199712335i,raw=467485i,scaled=467485i 1621254412000000000 +``` + +Time multiplexing: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,host=xyz raw=2947727i,scaled=4428970i,enabled=2201071844i,running=1464935978i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,host=xyz running=1465155618i,raw=302553190i,scaled=454511623i,enabled=2201035323i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK,host=xyz enabled=2200994057i,running=1466812391i,raw=3177535i,scaled=4767982i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK_ANY,host=xyz enabled=2200963921i,running=1470523496i,raw=3359272i,scaled=5027894i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES_ANY,host=xyz enabled=2200933946i,running=1470322480i,raw=23631950i,scaled=35374798i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES,host=xyz raw=18767833i,scaled=28169827i,enabled=2200888514i,running=1466317384i 1621254412000000000 +``` diff --git a/plugins/inputs/intel_pmu/activators.go b/plugins/inputs/intel_pmu/activators.go new file mode 100644 index 0000000000000..1750c72789c00 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators.go @@ -0,0 +1,205 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + + ia "github.com/intel/iaevents" +) + +type placementMaker interface { + makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) + makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) +} + +type iaPlacementMaker struct{} + +func (iaPlacementMaker) makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + var err error + var corePlacements []ia.PlacementProvider + + switch len(cores) { + case 0: + return nil, errors.New("no cores provided") + case 1: + corePlacements, err = ia.NewCorePlacements(factory, cores[0]) + if err != nil { + return nil, err + } + default: + corePlacements, err = ia.NewCorePlacements(factory, cores[0], cores[1:]...) + if err != nil { + return nil, err + } + } + return corePlacements, nil +} + +func (iaPlacementMaker) makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + return ia.NewUncoreAllPlacements(factory, socket) +} + +type eventsActivator interface { + activateEvent(ia.Activator, ia.PlacementProvider, ia.Options) (*ia.ActiveEvent, error) + activateGroup(ia.PlacementProvider, []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) + activateMulti(ia.MultiActivator, []ia.PlacementProvider, ia.Options) (*ia.ActiveMultiEvent, error) +} + +type iaEventsActivator struct{} + +func (iaEventsActivator) activateEvent(a ia.Activator, p ia.PlacementProvider, o ia.Options) (*ia.ActiveEvent, error) { + return a.Activate(p, ia.NewEventTargetProcess(-1, 0), o) +} + +func (iaEventsActivator) activateGroup(p ia.PlacementProvider, e []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) { + return ia.ActivateGroup(p, ia.NewEventTargetProcess(-1, 0), e) +} + +func (iaEventsActivator) activateMulti(a ia.MultiActivator, p []ia.PlacementProvider, o ia.Options) (*ia.ActiveMultiEvent, error) { + return a.ActivateMulti(p, ia.NewEventTargetProcess(-1, 0), o) +} + +type entitiesActivator interface { + activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesActivator struct { + placementMaker placementMaker + perfActivator eventsActivator +} + +func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, coreEventsEntity := range coreEntities { + err := ea.activateCoreEvents(coreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err) + } + } + for _, uncoreEventsEntity := range uncoreEntities { + err := ea.activateUncoreEvents(uncoreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error { + if entity == nil { + return fmt.Errorf("core events entity is nil") + } + if ea.placementMaker == nil { + return fmt.Errorf("placement maker is nil") + } + if entity.PerfGroup { + err := ea.activateCoreEventsGroup(entity) + if err != nil { + return fmt.Errorf("failed to activate core events group: %v", err) + } + } else { + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core parsed event is nil") + } + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event) + if err != nil { + return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err) + } + activeEvent, err := ea.activateEventForPlacements(event, placements) + if err != nil { + return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err) + } + entity.activeEvents = append(entity.activeEvents, activeEvent...) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error { + if entity == nil { + return fmt.Errorf("uncore events entity is nil") + } + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("events activator or placement maker is nil") + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("uncore parsed event is nil") + } + perfEvent := event.custom.Event + if perfEvent == nil { + return fmt.Errorf("perf event of `%s` event is nil", event.name) + } + options := event.custom.Options + + for _, socket := range entity.parsedSockets { + placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent) + if err != nil { + return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err) + } + activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options) + if err != nil { + return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err) + } + events := activeMultiEvent.Events() + entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket}) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error { + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("missing perf activator or placement maker") + } + if entity == nil || len(entity.parsedEvents) < 1 { + return fmt.Errorf("missing parsed events") + } + + var events []ia.CustomizableEvent + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core event is nil") + } + events = append(events, event.custom) + } + leader := entity.parsedEvents[0].custom + + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event) + if err != nil { + return fmt.Errorf("failed to make core placements: %v", err) + } + + for _, plc := range placements { + activeGroup, err := ea.perfActivator.activateGroup(plc, events) + if err != nil { + return err + } + entity.activeEvents = append(entity.activeEvents, activeGroup.Events()...) + } + return nil +} + +func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) { + if event == nil { + return nil, fmt.Errorf("core event is nil") + } + if ea.perfActivator == nil { + return nil, fmt.Errorf("missing perf activator") + } + var activeEvents []*ia.ActiveEvent + for _, placement := range placements { + perfEvent := event.custom.Event + options := event.custom.Options + + activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options) + if err != nil { + return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err) + } + activeEvents = append(activeEvents, activeEvent) + } + return activeEvents, nil +} diff --git a/plugins/inputs/intel_pmu/activators_test.go b/plugins/inputs/intel_pmu/activators_test.go new file mode 100644 index 0000000000000..28f05710d3e69 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators_test.go @@ -0,0 +1,432 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type mockPlacementFactory struct { + err bool +} + +func (m *mockPlacementFactory) NewPlacements(_ string, cpu int, cpus ...int) ([]ia.PlacementProvider, error) { + if m.err { + return nil, errors.New("mock error") + } + placements := []ia.PlacementProvider{ + &ia.Placement{CPU: cpu, PMUType: 4}, + } + for _, cpu := range cpus { + placements = append(placements, &ia.Placement{CPU: cpu, PMUType: 4}) + } + return placements, nil +} + +func TestActivateEntities(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{} + + // more core test cases in TestActivateCoreEvents + t.Run("failed to activate core events", func(t *testing.T) { + tag := "TAG" + mEntities := []*CoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(mEntities, nil) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag)) + }) + + // more uncore test cases in TestActivateUncoreEvents + t.Run("failed to activate uncore events", func(t *testing.T) { + tag := "TAG" + mEntities := []*UncoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(nil, mEntities) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag)) + }) + + t.Run("nothing to do", func(t *testing.T) { + err := mEntitiesActivator.activateEntities(nil, nil) + require.NoError(t, err) + }) +} + +func TestActivateUncoreEvents(t *testing.T) { + mActivator := &mockEventsActivator{} + mMaker := &mockPlacementMaker{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateUncoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore events entity is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore parsed event is nil") + }) + + t.Run("perf event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + name := "event name" + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name)) + }) + + t.Run("placement maker and perf activator is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: nil} + err := mEntitiesActivator.activateUncoreEvents(&UncoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "events activator or placement maker is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(nil, errMock).Once() + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(placements, nil).Once() + mActivator.On("activateMulti", parsedEvents[0].custom.Event, placements, parsedEvents[0].custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4", Uncore: true}}}, + } + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}, &ia.Placement{}} + + var expectedEvents []multiEvent + for _, event := range parsedEvents { + for _, socket := range mEntity.parsedSockets { + mMaker.On("makeUncorePlacements", event.custom.Event, socket).Return(placements, nil).Once() + newActiveMultiEvent := &ia.ActiveMultiEvent{} + expectedEvents = append(expectedEvents, multiEvent{newActiveMultiEvent.Events(), event.custom.Event, socket}) + mActivator.On("activateMulti", event.custom.Event, placements, event.custom.Options).Return(newActiveMultiEvent, nil).Once() + } + } + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.NoError(t, err) + require.Equal(t, expectedEvents, mEntity.activeMultiEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEvents(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "core events entity is nil") + }) + + t.Run("placement maker is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(&CoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "placement maker is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core parsed event is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + mMaker.On("makeCorePlacements", mEntity.parsedCores, parsedEvents[0].custom.Event).Return(nil, errMock).Once() + err := mEntitiesActivator.activateCoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + event := parsedEvents[0] + plc := placements[0] + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("failed to activate core events group", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: nil} + mEntity := &CoreEventEntity{PerfGroup: true, parsedEvents: nil} + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to activate core events group") + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4"}}}, + } + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}, &ia.Placement{CPU: 2}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + var activeEvents []*ia.ActiveEvent + for _, event := range parsedEvents { + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + for _, plc := range placements { + newActiveEvent := &ia.ActiveEvent{PerfEvent: event.custom.Event} + activeEvents = append(activeEvents, newActiveEvent) + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(newActiveEvent, nil).Once() + } + } + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.NoError(t, err) + require.Equal(t, activeEvents, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEventsGroup(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + eActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + errMock := errors.New("mock error") + + leader := &ia.PerfEvent{Name: "mock event 1"} + perfEvent2 := &ia.PerfEvent{Name: "mock event 2"} + + parsedEvents := []*eventWithQuals{{custom: ia.CustomizableEvent{Event: leader}}, {custom: ia.CustomizableEvent{Event: perfEvent2}}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}} + + // cannot populate this struct due to unexported events field + activeGroup := &ia.ActiveEventGroup{} + + mEntity := &CoreEventEntity{ + EventsTag: "mock group", + PerfGroup: true, + parsedEvents: parsedEvents, + parsedCores: nil, + } + + var events []ia.CustomizableEvent + for _, event := range parsedEvents { + events = append(events, event.custom) + } + + t.Run("missing perf activator and placement maker", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator or placement maker") + }) + + t.Run("missing parsed events", func(t *testing.T) { + mActivator := &iaEntitiesActivator{placementMaker: &mockPlacementMaker{}, perfActivator: &mockEventsActivator{}} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing parsed events") + }) + + t.Run("nil in parsed event", func(t *testing.T) { + mEntity := &CoreEventEntity{EventsTag: "Nice tag", PerfGroup: true, parsedEvents: []*eventWithQuals{nil, nil}} + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + }) + + t.Run("failed to make core placements", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(nil, errMock).Once() + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to make core placements") + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + mActivator.On("activateGroup", placements[0], events).Return(nil, errMock).Once() + + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), errMock.Error()) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + var allActive []*ia.ActiveEvent + t.Run("successfully activated group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + for _, plc := range placements { + mActivator.On("activateGroup", plc, events).Return(activeGroup, nil).Once() + allActive = append(allActive, activeGroup.Events()...) + } + + err := eActivator.activateCoreEventsGroup(mEntity) + require.NoError(t, err) + require.Equal(t, allActive, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestMakeCorePlacements(t *testing.T) { + tests := []struct { + name string + cores []int + perfEvent ia.PlacementFactory + result []ia.PlacementProvider + errMsg string + }{ + {"no cores", nil, &ia.PerfEvent{}, nil, "no cores provided"}, + {"one core placement", []int{1}, &mockPlacementFactory{}, []ia.PlacementProvider{&ia.Placement{CPU: 1, PMUType: 4}}, ""}, + {"multiple core placement", []int{1, 2, 4}, &mockPlacementFactory{}, []ia.PlacementProvider{ + &ia.Placement{CPU: 1, PMUType: 4}, + &ia.Placement{CPU: 2, PMUType: 4}, + &ia.Placement{CPU: 4, PMUType: 4}}, + ""}, + {"placement factory error", []int{1}, &mockPlacementFactory{true}, nil, "mock error"}, + {"placement factory error 2", []int{1, 2, 3}, &mockPlacementFactory{true}, nil, "mock error"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + maker := &iaPlacementMaker{} + providers, err := maker.makeCorePlacements(test.cores, test.perfEvent) + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Nil(t, providers) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.result, providers) + }) + } +} + +func TestActivateEventForPlacement(t *testing.T) { + placement1 := &ia.Placement{CPU: 0} + placement2 := &ia.Placement{CPU: 1} + placement3 := &ia.Placement{CPU: 2} + + mPlacements := []ia.PlacementProvider{placement1, placement2, placement3} + + mPerfEvent := &ia.PerfEvent{Name: "mock1"} + mOptions := &ia.PerfEventOptions{} + mEvent := &eventWithQuals{name: mPerfEvent.Name, custom: ia.CustomizableEvent{Event: mPerfEvent, Options: mOptions}} + + mPerfActivator := &mockEventsActivator{} + mActivator := &iaEntitiesActivator{perfActivator: mPerfActivator} + + t.Run("event is nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(nil, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + require.Nil(t, activeEvents) + }) + + t.Run("perf activator is nil", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator") + require.Nil(t, activeEvents) + }) + + t.Run("placements are nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(mEvent, nil) + require.NoError(t, err) + require.Nil(t, activeEvents) + }) + + t.Run("activation error", func(t *testing.T) { + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err")) + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name)) + require.Nil(t, activeEvents) + mPerfActivator.AssertExpectations(t) + }) + + t.Run("successfully activated", func(t *testing.T) { + mActiveEvent := &ia.ActiveEvent{} + mActiveEvent2 := &ia.ActiveEvent{} + mActiveEvent3 := &ia.ActiveEvent{} + + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(mActiveEvent, nil). + On("activateEvent", mPerfEvent, placement2, mOptions).Once().Return(mActiveEvent2, nil). + On("activateEvent", mPerfEvent, placement3, mOptions).Once().Return(mActiveEvent3, nil) + + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.NoError(t, err) + require.Len(t, activeEvents, len(mPlacements)) + require.Contains(t, activeEvents, mActiveEvent) + require.Contains(t, activeEvents, mActiveEvent2) + mPerfActivator.AssertExpectations(t) + }) +} diff --git a/plugins/inputs/intel_pmu/config.go b/plugins/inputs/intel_pmu/config.go new file mode 100644 index 0000000000000..c788744e9549b --- /dev/null +++ b/plugins/inputs/intel_pmu/config.go @@ -0,0 +1,239 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +// Maximum size of core IDs or socket IDs (8192). Based on maximum value of CPUs that linux kernel supports. +const maxIDsSize = 1 << 13 + +type entitiesParser interface { + parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) +} + +type configParser struct { + log telegraf.Logger + sys sysInfoProvider +} + +func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) { + if len(coreEntities) == 0 && len(uncoreEntities) == 0 { + return fmt.Errorf("neither core nor uncore entities configured") + } + + for _, coreEntity := range coreEntities { + if coreEntity == nil { + return fmt.Errorf("core entity is nil") + } + if coreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all core events from provided files will be configured") + } + coreEntity.allEvents = true + } else { + events := cp.parseEvents(coreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of core events was provided") + } + coreEntity.parsedEvents = events + } + + coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores) + if err != nil { + return fmt.Errorf("error during cores parsing: %v", err) + } + } + + for _, uncoreEntity := range uncoreEntities { + if uncoreEntity == nil { + return fmt.Errorf("uncore entity is nil") + } + if uncoreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all uncore events from provided files will be configured") + } + uncoreEntity.allEvents = true + } else { + events := cp.parseEvents(uncoreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of uncore events was provided") + } + uncoreEntity.parsedEvents = events + } + + uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets) + if err != nil { + return fmt.Errorf("error during sockets parsing: %v", err) + } + } + return nil +} + +func (cp *configParser) parseEvents(events []string) []*eventWithQuals { + if len(events) == 0 { + return nil + } + + events, duplications := removeDuplicateStrings(events) + for _, duplication := range duplications { + if cp.log != nil { + cp.log.Warnf("duplicated event `%s` will be removed", duplication) + } + } + return parseEventsWithQualifiers(events) +} + +func (cp *configParser) parseCores(cores []string) ([]int, error) { + if cores == nil { + if cp.log != nil { + cp.log.Debug("all possible cores will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + cores, err := cp.sys.allCPUs() + if err != nil { + return nil, fmt.Errorf("cannot obtain all cpus: %v", err) + } + return cores, nil + } + if len(cores) == 0 { + return nil, fmt.Errorf("an empty list of cores was provided") + } + + result, err := cp.parseIntRanges(cores) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseSockets(sockets []string) ([]int, error) { + if sockets == nil { + if cp.log != nil { + cp.log.Debug("all possible sockets will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + sockets, err := cp.sys.allSockets() + if err != nil { + return nil, fmt.Errorf("cannot obtain all sockets: %v", err) + } + return sockets, nil + } + if len(sockets) == 0 { + return nil, fmt.Errorf("an empty list of sockets was provided") + } + + result, err := cp.parseIntRanges(sockets) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseIntRanges(ranges []string) ([]int, error) { + var ids []int + var duplicatedIDs []int + var err error + ids, err = parseIDs(ranges) + if err != nil { + return nil, err + } + ids, duplicatedIDs = removeDuplicateValues(ids) + for _, duplication := range duplicatedIDs { + if cp.log != nil { + cp.log.Warnf("duplicated id number `%d` will be removed", duplication) + } + } + return ids, nil +} + +func parseEventsWithQualifiers(events []string) []*eventWithQuals { + var result []*eventWithQuals + + for _, event := range events { + newEventWithQualifiers := &eventWithQuals{} + + split := strings.Split(event, ":") + newEventWithQualifiers.name = split[0] + + if len(split) > 1 { + newEventWithQualifiers.qualifiers = split[1:] + } + result = append(result, newEventWithQualifiers) + } + return result +} + +func parseIDs(allIDsStrings []string) ([]int, error) { + var result []int + for _, idsString := range allIDsStrings { + ids := strings.Split(idsString, ",") + + for _, id := range ids { + id := strings.TrimSpace(id) + // a-b support + var start, end uint + n, err := fmt.Sscanf(id, "%d-%d", &start, &end) + if err == nil && n == 2 { + if start >= end { + return nil, fmt.Errorf("`%d` is equal or greater than `%d`", start, end) + } + for ; start <= end; start++ { + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, int(start)) + } + continue + } + // Single value + num, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err) + } + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, num) + } + } + return result, nil +} + +func removeDuplicateValues(intSlice []int) (result []int, duplicates []int) { + keys := make(map[int]bool) + + for _, entry := range intSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} + +func removeDuplicateStrings(strSlice []string) (result []string, duplicates []string) { + keys := make(map[string]bool) + + for _, entry := range strSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} diff --git a/plugins/inputs/intel_pmu/config_test.go b/plugins/inputs/intel_pmu/config_test.go new file mode 100644 index 0000000000000..5a0f288e3b443 --- /dev/null +++ b/plugins/inputs/intel_pmu/config_test.go @@ -0,0 +1,230 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestConfigParser_parseEntities(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + e := ia.CustomizableEvent{} + + t.Run("no entities", func(t *testing.T) { + err := mConfigParser.parseEntities(nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "neither core nor uncore entities configured") + }) + + // more specific parsing cases in TestConfigParser_parseIntRanges and TestConfigParser_parseEvents + coreTests := []struct { + name string + + coreEntity *CoreEventEntity + parsedCoreEvents []*eventWithQuals + parsedCores []int + coreAll bool + + uncoreEntity *UncoreEventEntity + parsedUncoreEvents []*eventWithQuals + parsedSockets []int + uncoreAll bool + + failMsg string + }{ + {"no events provided", + &CoreEventEntity{Events: nil, Cores: []string{"1"}}, nil, []int{1}, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0"}}, nil, []int{0}, true, + ""}, + {"uncore entity is nil", + &CoreEventEntity{Events: []string{"EVENT"}, Cores: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + nil, nil, nil, false, + "uncore entity is nil"}, + {"core entity is nil", + nil, nil, nil, false, + &UncoreEventEntity{Events: []string{"EVENT"}, Sockets: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + "core entity is nil"}, + {"error parsing sockets", + &CoreEventEntity{Events: nil, Cores: []string{"1,2"}}, nil, []int{1, 2}, true, + &UncoreEventEntity{Events: []string{"E"}, Sockets: []string{"wrong sockets"}}, []*eventWithQuals{{"E", nil, e}}, nil, false, + "error during sockets parsing"}, + {"error parsing cores", + &CoreEventEntity{Events: nil, Cores: []string{"wrong cpus"}}, nil, nil, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0,1"}}, nil, []int{0, 1}, true, + "error during cores parsing"}, + {"valid settings", + &CoreEventEntity{Events: []string{"E1", "E2:config=123"}, Cores: []string{"1-5"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", []string{"config=123"}, e}}, []int{1, 2, 3, 4, 5}, false, + &UncoreEventEntity{Events: []string{"E1", "E2", "E3"}, Sockets: []string{"0,2-6"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", nil, e}, {"E3", nil, e}}, []int{0, 2, 3, 4, 5, 6}, false, + ""}, + } + + for _, test := range coreTests { + t.Run(test.name, func(t *testing.T) { + coreEntities := []*CoreEventEntity{test.coreEntity} + uncoreEntities := []*UncoreEventEntity{test.uncoreEntity} + + err := mConfigParser.parseEntities(coreEntities, uncoreEntities) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.coreAll, test.coreEntity.allEvents) + require.Equal(t, test.parsedCores, test.coreEntity.parsedCores) + require.Equal(t, test.parsedCoreEvents, test.coreEntity.parsedEvents) + + require.Equal(t, test.uncoreAll, test.uncoreEntity.allEvents) + require.Equal(t, test.parsedSockets, test.uncoreEntity.parsedSockets) + require.Equal(t, test.parsedUncoreEvents, test.uncoreEntity.parsedEvents) + }) + } +} + +func TestConfigParser_parseCores(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no cores provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all cpus info", func(t *testing.T) { + mSysInfo.On("allCPUs").Return(nil, errors.New("all cpus error")).Once() + result, err := mConfigParser.parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all cpus") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allCPUs := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} + + mSysInfo.On("allCPUs").Return(allCPUs, nil).Once() + result, err := mConfigParser.parseCores(nil) + require.NoError(t, err) + require.Equal(t, allCPUs, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseSockets(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no sockets provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all sockets info", func(t *testing.T) { + mSysInfo.On("allSockets").Return(nil, errors.New("all sockets error")).Once() + result, err := mConfigParser.parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all sockets") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allSockets := []int{0, 1, 2, 3, 4} + + mSysInfo.On("allSockets").Return(allSockets, nil).Once() + result, err := mConfigParser.parseSockets(nil) + require.NoError(t, err) + require.Equal(t, allSockets, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseEvents(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + e := ia.CustomizableEvent{} + + tests := []struct { + name string + input []string + result []*eventWithQuals + }{ + {"no events", nil, nil}, + {"single string", []string{"mock string"}, []*eventWithQuals{{"mock string", nil, e}}}, + {"two events", []string{"EVENT.FIRST", "EVENT.SECOND"}, []*eventWithQuals{{"EVENT.FIRST", nil, e}, {"EVENT.SECOND", nil, e}}}, + {"event with configs", []string{"EVENT.SECOND:config1=0x404300k:config2=0x404300k"}, + []*eventWithQuals{{"EVENT.SECOND", []string{"config1=0x404300k", "config2=0x404300k"}, e}}}, + {"two events with modifiers", []string{"EVENT.FIRST:config1=0x200300:config2=0x231100:u:H", "EVENT.SECOND:K:p"}, + []*eventWithQuals{{"EVENT.FIRST", []string{"config1=0x200300", "config2=0x231100", "u", "H"}, e}, {"EVENT.SECOND", []string{"K", "p"}, e}}}, + {"duplicates", []string{"EVENT1", "EVENT1", "EVENT2"}, []*eventWithQuals{{"EVENT1", nil, e}, {"EVENT2", nil, e}}}, + {"duplicates with different configs", []string{"EVENT1:config1", "EVENT1:config2"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}, {"EVENT1", []string{"config2"}, e}}}, + {"duplicates with the same modifiers", []string{"EVENT1:config1", "EVENT1:config1"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := mConfigParser.parseEvents(test.input) + require.Equal(t, test.result, result) + }) + } +} + +func TestConfigParser_parseIntRanges(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + tests := []struct { + name string + input []string + result []int + failMsg string + }{ + {"coma separated", []string{"0,1,2,3,4"}, []int{0, 1, 2, 3, 4}, ""}, + {"range", []string{"0-10"}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, ""}, + {"mixed", []string{"0-3", "4", "12-16"}, []int{0, 1, 2, 3, 4, 12, 13, 14, 15, 16}, ""}, + {"min and max values", []string{"-2147483648", "2147483647"}, []int{math.MinInt32, math.MaxInt32}, ""}, + {"should remove duplicates", []string{"1-5", "2-6"}, []int{1, 2, 3, 4, 5, 6}, ""}, + {"wrong format", []string{"1,2,3%$S,-100"}, nil, "wrong format for id"}, + {"start is greater than end", []string{"10-3"}, nil, "`10` is equal or greater than `3"}, + {"too big value", []string{"18446744073709551615"}, nil, "wrong format for id"}, + {"too much numbers", []string{fmt.Sprintf("0-%d", maxIDsSize)}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + {"too much numbers mixed", []string{fmt.Sprintf("1-%d", maxIDsSize), "0"}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, err := mConfigParser.parseIntRanges(test.input) + require.Equal(t, test.result, result) + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + }) + } +} diff --git a/plugins/inputs/intel_pmu/intel_pmu.go b/plugins/inputs/intel_pmu/intel_pmu.go new file mode 100644 index 0000000000000..99818c4a7bfdb --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu.go @@ -0,0 +1,477 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "io/ioutil" + "math" + "math/big" + "os" + "strconv" + "strings" + "syscall" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + ia "github.com/intel/iaevents" +) + +// Linux availability: https://www.kernel.org/doc/Documentation/sysctl/fs.txt +const fileMaxPath = "/proc/sys/fs/file-max" + +type fileInfoProvider interface { + readFile(string) ([]byte, error) + lstat(string) (os.FileInfo, error) + fileLimit() (uint64, error) +} + +type fileHelper struct{} + +func (fileHelper) readFile(path string) ([]byte, error) { + return ioutil.ReadFile(path) +} + +func (fileHelper) lstat(path string) (os.FileInfo, error) { + return os.Lstat(path) +} + +func (fileHelper) fileLimit() (uint64, error) { + var rLimit syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) + return rLimit.Cur, err +} + +type sysInfoProvider interface { + allCPUs() ([]int, error) + allSockets() ([]int, error) +} + +type iaSysInfo struct{} + +func (iaSysInfo) allCPUs() ([]int, error) { + return ia.AllCPUs() +} + +func (iaSysInfo) allSockets() ([]int, error) { + return ia.AllSockets() +} + +// IntelPMU is the plugin type. +type IntelPMU struct { + EventListPaths []string `toml:"event_definitions"` + CoreEntities []*CoreEventEntity `toml:"core_events"` + UncoreEntities []*UncoreEventEntity `toml:"uncore_events"` + + Log telegraf.Logger `toml:"-"` + + fileInfo fileInfoProvider + entitiesReader entitiesValuesReader +} + +// CoreEventEntity represents config section for core events. +type CoreEventEntity struct { + Events []string `toml:"events"` + Cores []string `toml:"cores"` + EventsTag string `toml:"events_tag"` + PerfGroup bool `toml:"perf_group"` + + parsedEvents []*eventWithQuals + parsedCores []int + allEvents bool + + activeEvents []*ia.ActiveEvent +} + +// UncoreEventEntity represents config section for uncore events. +type UncoreEventEntity struct { + Events []string `toml:"events"` + Sockets []string `toml:"sockets"` + Aggregate bool `toml:"aggregate_uncore_units"` + EventsTag string `toml:"events_tag"` + + parsedEvents []*eventWithQuals + parsedSockets []int + allEvents bool + + activeMultiEvents []multiEvent +} + +type multiEvent struct { + activeEvents []*ia.ActiveEvent + perfEvent *ia.PerfEvent + socket int +} + +type eventWithQuals struct { + name string + qualifiers []string + + custom ia.CustomizableEvent +} + +func (i *IntelPMU) Description() string { + return "Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem" +} + +func (i *IntelPMU) SampleConfig() string { + return ` + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" +` +} + +// Start is required for IntelPMU to implement the telegraf.ServiceInput interface. +// Necessary initialization and config checking are done in Init. +func (IntelPMU) Start(_ telegraf.Accumulator) error { + return nil +} + +func (i *IntelPMU) Init() error { + err := checkFiles(i.EventListPaths, i.fileInfo) + if err != nil { + return fmt.Errorf("error during event definitions paths validation: %v", err) + } + + reader, err := newReader(i.EventListPaths) + if err != nil { + return err + } + transformer := ia.NewPerfTransformer() + resolver := &iaEntitiesResolver{reader: reader, transformer: transformer, log: i.Log} + parser := &configParser{log: i.Log, sys: &iaSysInfo{}} + activator := &iaEntitiesActivator{perfActivator: &iaEventsActivator{}, placementMaker: &iaPlacementMaker{}} + + i.entitiesReader = &iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: &realClock{}} + + return i.initialization(parser, resolver, activator) +} + +func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error { + if parser == nil || resolver == nil || activator == nil { + return fmt.Errorf("entities parser and/or resolver and/or activator is nil") + } + + err := parser.parseEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during parsing configuration sections: %v", err) + } + + err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events resolving: %v", err) + } + + err = i.checkFileDescriptors() + if err != nil { + return fmt.Errorf("error during file descriptors checking: %v", err) + } + + err = activator.activateEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events activation: %v", err) + } + return nil +} + +func (i *IntelPMU) checkFileDescriptors() error { + coreFd, err := estimateCoresFd(i.CoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err) + } + uncoreFd, err := estimateUncoreFd(i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err) + } + if coreFd > math.MaxUint64-uncoreFd { + return fmt.Errorf("requested number of file descriptors exceeds uint64") + } + allFd := coreFd + uncoreFd + + // maximum file descriptors enforced on a kernel level + maxFd, err := readMaxFD(i.fileInfo) + if err != nil { + i.Log.Warnf("cannot obtain number of available file descriptors: %v", err) + } else if allFd > maxFd { + return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", allFd, maxFd) + } + + // soft limit for current process + limit, err := i.fileInfo.fileLimit() + if err != nil { + i.Log.Warnf("cannot obtain limit value of open files: %v", err) + } else if allFd > limit { + return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", allFd, limit) + } + + return nil +} + +func (i *IntelPMU) Gather(acc telegraf.Accumulator) error { + if i.entitiesReader == nil { + return fmt.Errorf("entities reader is nil") + } + coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to read entities events values: %v", err) + } + + for id, m := range coreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + coreMetrics[id].scaled = scaled.Uint64() + } + for id, m := range uncoreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + uncoreMetrics[id].scaled = scaled.Uint64() + } + + publishCoreMeasurements(coreMetrics, acc) + publishUncoreMeasurements(uncoreMetrics, acc) + + return nil +} + +func (i *IntelPMU) Stop() { + for _, entity := range i.CoreEntities { + if entity == nil { + continue + } + for _, event := range entity.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err) + } + } + } + for _, entity := range i.UncoreEntities { + if entity == nil { + continue + } + for _, multi := range entity.activeMultiEvents { + for _, event := range multi.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err) + } + } + } + } +} + +func newReader(files []string) (*ia.JSONFilesReader, error) { + reader := ia.NewFilesReader() + for _, file := range files { + err := reader.AddFiles(file) + if err != nil { + return nil, fmt.Errorf("failed to add files to reader: %v", err) + } + } + return reader, nil +} + +func estimateCoresFd(entities []*CoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + events := uint64(len(entity.parsedEvents)) + cores := uint64(len(entity.parsedCores)) + number, err = multiplyAndAdd(events, cores, number) + if err != nil { + return 0, err + } + } + return number, nil +} + +func estimateUncoreFd(entities []*UncoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + for _, e := range entity.parsedEvents { + if e.custom.Event == nil { + continue + } + pmus := uint64(len(e.custom.Event.PMUTypes)) + sockets := uint64(len(entity.parsedSockets)) + number, err = multiplyAndAdd(pmus, sockets, number) + if err != nil { + return 0, err + } + } + } + return number, nil +} + +func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error) { + bigA := new(big.Int).SetUint64(factorA) + bigB := new(big.Int).SetUint64(factorB) + activeEvents := new(big.Int).Mul(bigA, bigB) + if !activeEvents.IsUint64() { + return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String()) + } + if sum > math.MaxUint64-activeEvents.Uint64() { + return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) + } + sum += activeEvents.Uint64() + return sum, nil +} + +func readMaxFD(reader fileInfoProvider) (uint64, error) { + if reader == nil { + return 0, fmt.Errorf("file reader is nil") + } + buf, err := reader.readFile(fileMaxPath) + if err != nil { + return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err) + } + max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err) + } + return max, nil +} + +func checkFiles(paths []string, fileInfo fileInfoProvider) error { + // No event definition JSON locations present + if len(paths) == 0 { + return fmt.Errorf("no paths were given") + } + if fileInfo == nil { + return fmt.Errorf("file info provider is nil") + } + // Wrong files + for _, path := range paths { + lInfo, err := fileInfo.lstat(path) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("file `%s` doesn't exist", path) + } + return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + } + mode := lInfo.Mode() + if mode&os.ModeSymlink != 0 { + return fmt.Errorf("file %s is a symlink", path) + } + if !mode.IsRegular() { + return fmt.Errorf("file `%s` doesn't point to a reagular file", path) + } + } + return nil +} + +func publishCoreMeasurements(metrics []coreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + tags["cpu"] = strconv.Itoa(m.cpu) + + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func publishUncoreMeasurements(metrics []uncoreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + + tags["socket"] = strconv.Itoa(m.socket) + tags["unit_type"] = m.unitType + if !m.agg { + tags["unit"] = m.unit + } + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func init() { + inputs.Add("intel_pmu", func() telegraf.Input { + pmu := IntelPMU{ + fileInfo: &fileHelper{}, + } + return &pmu + }) +} diff --git a/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go new file mode 100644 index 0000000000000..64c7f5bbf1ce1 --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go @@ -0,0 +1,4 @@ +//go:build !linux || !amd64 +// +build !linux !amd64 + +package intel_pmu diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go new file mode 100644 index 0000000000000..e096c4c021d0e --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -0,0 +1,555 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestInitialization(t *testing.T) { + mError := errors.New("mock error") + mParser := &mockEntitiesParser{} + mResolver := &mockEntitiesResolver{} + mActivator := &mockEntitiesActivator{} + mFileInfo := &mockFileInfoProvider{} + + file := "path/to/file" + paths := []string{file} + + t.Run("missing parser, resolver or activator", func(t *testing.T) { + err := (&IntelPMU{}).initialization(mParser, nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, mResolver, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, nil, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + }) + + t.Run("parse entities error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during parsing configuration sections") + mParser.AssertExpectations(t) + }) + + t.Run("resolver error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events resolving") + mParser.AssertExpectations(t) + }) + + t.Run("exceeded file descriptors", func(t *testing.T) { + limit := []byte("10") + uncoreEntities := []*UncoreEventEntity{{parsedEvents: makeEvents(10, 21), parsedSockets: makeIDs(5)}} + estimation := 1050 + + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo, UncoreEntities: uncoreEntities} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(limit, nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", estimation, 10)) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + }) + + t.Run("failed to activate entities", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events activation") + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("everything all right", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestGather(t *testing.T) { + mEntitiesValuesReader := &mockEntitiesValuesReader{} + mAcc := &testutil.Accumulator{} + + mIntelPMU := &IntelPMU{entitiesReader: mEntitiesValuesReader} + + type fieldWithTags struct { + fields map[string]interface{} + tags map[string]string + } + + t.Run("entities reader is nil", func(t *testing.T) { + err := (&IntelPMU{entitiesReader: nil}).Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), "entities reader is nil") + }) + + t.Run("error while reading entities", func(t *testing.T) { + errMock := fmt.Errorf("houston we have a problem") + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(nil, nil, errMock).Once() + + err := mIntelPMU.Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read entities events values: %v", errMock)) + mEntitiesValuesReader.AssertExpectations(t) + }) + + tests := []struct { + name string + coreMetrics []coreMetric + uncoreMetrics []uncoreMetric + results []fieldWithTags + errMSg string + }{ + { + name: "successful readings", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: 100, Enabled: 200, Running: 200}, + name: "CORE_EVENT_1", + tag: "DOGES", + cpu: 1, + }, + { + values: ia.CounterValue{Raw: 2100, Enabled: 400, Running: 200}, + name: "CORE_EVENT_2", + cpu: 0, + }, + }, + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: 2134562, Enabled: 1000000, Running: 1000000}, + name: "UNCORE_EVENT_1", + tag: "SHIBA", + unitType: "cbox", + unit: "cbox_1", + socket: 3, + agg: false, + }, + { + values: ia.CounterValue{Raw: 2134562, Enabled: 3222222, Running: 2100000}, + name: "UNCORE_EVENT_2", + unitType: "cbox", + socket: 0, + agg: true, + }, + }, + results: []fieldWithTags{ + { + fields: map[string]interface{}{ + "raw": uint64(100), + "enabled": uint64(200), + "running": uint64(200), + "scaled": uint64(100), + }, + tags: map[string]string{ + "event": "CORE_EVENT_1", + "cpu": "1", + "events_tag": "DOGES", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2100), + "enabled": uint64(400), + "running": uint64(200), + "scaled": uint64(4200), + }, + tags: map[string]string{ + "event": "CORE_EVENT_2", + "cpu": "0", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(1000000), + "running": uint64(1000000), + "scaled": uint64(2134562), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_1", + "events_tag": "SHIBA", + "socket": "3", + "unit_type": "cbox", + "unit": "cbox_1", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(3222222), + "running": uint64(2100000), + "scaled": uint64(3275253), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_2", + "socket": "0", + "unit_type": "cbox", + }, + }, + }, + }, + { + name: "core scaled value greater then max uint64", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64", + }, + { + name: "uncore scaled value greater then max uint64", + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG_UNCORE", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(test.coreMetrics, test.uncoreMetrics, nil).Once() + + err := mIntelPMU.Gather(mAcc) + + mEntitiesValuesReader.AssertExpectations(t) + if len(test.errMSg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMSg) + return + } + require.NoError(t, err) + for _, result := range test.results { + mAcc.AssertContainsTaggedFields(t, "pmu_metric", result.fields, result.tags) + } + }) + } +} + +func TestCheckFileDescriptors(t *testing.T) { + tests := []struct { + name string + uncores []*UncoreEventEntity + cores []*CoreEventEntity + estimation uint64 + maxFD []byte + fileLimit uint64 + errMsg string + }{ + {"exceed maximum file descriptors number", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}}, + []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}}, + 12020, []byte("11000"), 8000, fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", 12020, 11000), + }, + {"exceed soft file limit", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 800, fmt.Sprintf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", 11000, 800), + }, + {"no exceeds", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, + []*CoreEventEntity{{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 13000, "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mIntelPMU := IntelPMU{ + CoreEntities: test.cores, + UncoreEntities: test.uncores, + fileInfo: mFileInfo, + Log: testutil.Logger{}, + } + mFileInfo.On("readFile", fileMaxPath).Return(test.maxFD, nil). + On("fileLimit").Return(test.fileLimit, nil).Once() + + err := mIntelPMU.checkFileDescriptors() + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) + } +} + +func TestEstimateUncoreFd(t *testing.T) { + tests := []struct { + name string + entities []*UncoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"nil perf event", []*UncoreEventEntity{{parsedEvents: []*eventWithQuals{{"", nil, ia.CustomizableEvent{}}}, parsedSockets: makeIDs(0)}}, 0}, + {"one uncore entity", []*UncoreEventEntity{{parsedEvents: makeEvents(10, 10), parsedSockets: makeIDs(20)}}, 2000}, + {"nil entity", []*UncoreEventEntity{nil, {parsedEvents: makeEvents(1, 8), parsedSockets: makeIDs(1)}}, 8}, + {"many core entities", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}, + }, 11305}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{UncoreEntities: test.entities} + result, err := estimateUncoreFd(mIntelPMU.UncoreEntities) + require.Equal(t, test.result, result) + require.NoError(t, err) + }) + } +} + +func TestEstimateCoresFd(t *testing.T) { + tests := []struct { + name string + entities []*CoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"one core entity", []*CoreEventEntity{{parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"nil entity", []*CoreEventEntity{nil, {parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"many core entities", []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}, + }, 715}, + {"1024 events", []*CoreEventEntity{{parsedEvents: makeEvents(1024, 1), parsedCores: makeIDs(12)}}, 12288}, + {"big number", []*CoreEventEntity{{parsedEvents: makeEvents(1048576, 1), parsedCores: makeIDs(1024)}}, 1073741824}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{CoreEntities: test.entities} + result, err := estimateCoresFd(mIntelPMU.CoreEntities) + require.NoError(t, err) + require.Equal(t, test.result, result) + }) + } +} + +func makeEvents(number int, pmusNumber int) []*eventWithQuals { + a := make([]*eventWithQuals, number) + for i := range a { + b := make([]ia.NamedPMUType, pmusNumber) + for j := range b { + b[j] = ia.NamedPMUType{} + } + a[i] = &eventWithQuals{fmt.Sprintf("EVENT.%d", i), nil, + ia.CustomizableEvent{Event: &ia.PerfEvent{PMUTypes: b}}, + } + } + return a +} + +func makeIDs(number int) []int { + a := make([]int, number) + for i := range a { + a[i] = i + } + return a +} + +func TestReadMaxFD(t *testing.T) { + mFileReader := &mockFileInfoProvider{} + + t.Run("reader is nil", func(t *testing.T) { + result, err := readMaxFD(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file reader is nil") + require.Zero(t, result) + }) + + openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath) + parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath) + + tests := []struct { + name string + err error + content []byte + maxFD uint64 + failMsg string + }{ + {"read file error", fmt.Errorf("mock error"), nil, 0, openErrorMsg}, + {"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg}, + {"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg}, + {"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg}, + {"reading succeeded", nil, []byte("12343122"), 12343122, ""}, + {"min value reading", nil, []byte("0"), 0, ""}, + {"max uint 64 reading", nil, []byte("18446744073709551615"), math.MaxUint64, ""}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileReader.On("readFile", fileMaxPath).Return(test.content, test.err).Once() + result, err := readMaxFD(mFileReader) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + } else { + require.NoError(t, err) + } + require.Equal(t, test.maxFD, result) + mFileReader.AssertExpectations(t) + }) + } +} + +func TestAddFiles(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mError := errors.New("mock error") + + t.Run("no paths", func(t *testing.T) { + err := checkFiles([]string{}, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), "no paths were given") + }) + + t.Run("no file info provider", func(t *testing.T) { + err := checkFiles([]string{"path/1, path/2"}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file info provider is nil") + }) + + t.Run("stat error", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, mError).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file does not exist", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, os.ErrNotExist).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file is symlink", func(t *testing.T) { + file := "path/to/symlink" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeSymlink} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file doesn't point to a regular file", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeDir} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("checking succeeded", func(t *testing.T) { + paths := []string{"path/to/file1", "path/to/file2", "path/to/file3"} + fileInfo := fakeFileInfo{} + + for _, file := range paths { + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + } + + err := checkFiles(paths, mFileInfo) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) +} + +type fakeFileInfo struct { + fileMode os.FileMode +} + +func (f fakeFileInfo) Name() string { return "" } +func (f fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } +func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (f fakeFileInfo) IsDir() bool { return false } +func (f fakeFileInfo) Sys() interface{} { return nil } diff --git a/plugins/inputs/intel_pmu/mocks.go b/plugins/inputs/intel_pmu/mocks.go new file mode 100644 index 0000000000000..82799b26f2b04 --- /dev/null +++ b/plugins/inputs/intel_pmu/mocks.go @@ -0,0 +1,407 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "os" + + "github.com/intel/iaevents" + "github.com/stretchr/testify/mock" +) + +// mockValuesReader is an autogenerated mock type for the valuesReader type +type mockValuesReader struct { + mock.Mock +} + +// readValue provides a mock function with given fields: event +func (_m *mockValuesReader) readValue(event *iaevents.ActiveEvent) (iaevents.CounterValue, error) { + ret := _m.Called(event) + + var r0 iaevents.CounterValue + if rf, ok := ret.Get(0).(func(*iaevents.ActiveEvent) iaevents.CounterValue); ok { + r0 = rf(event) + } else { + r0 = ret.Get(0).(iaevents.CounterValue) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*iaevents.ActiveEvent) error); ok { + r1 = rf(event) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockEntitiesValuesReader is an autogenerated mock type for the entitiesValuesReader type +type mockEntitiesValuesReader struct { + mock.Mock +} + +// readEntities provides a mock function with given fields: _a0, _a1 +func (_m *mockEntitiesValuesReader) readEntities(_a0 []*CoreEventEntity, _a1 []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + ret := _m.Called(_a0, _a1) + + var r0 []coreMetric + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) []coreMetric); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coreMetric) + } + } + + var r1 []uncoreMetric + if rf, ok := ret.Get(1).(func([]*CoreEventEntity, []*UncoreEventEntity) []uncoreMetric); ok { + r1 = rf(_a0, _a1) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]uncoreMetric) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r2 = rf(_a0, _a1) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockEntitiesActivator is an autogenerated mock type for the entitiesActivator type +type mockEntitiesActivator struct { + mock.Mock +} + +// activateEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesParser is an autogenerated mock type for the entitiesParser type +type mockEntitiesParser struct { + mock.Mock +} + +// parseEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesResolver is an autogenerated mock type for the entitiesResolver type +type mockEntitiesResolver struct { + mock.Mock +} + +// resolveEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEventsActivator is an autogenerated mock type for the eventsActivator type +type mockEventsActivator struct { + mock.Mock +} + +// activateEvent provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateEvent(_a0 iaevents.Activator, _a1 iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveEvent + if rf, ok := ret.Get(0).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateGroup provides a mock function with given fields: _a0, _a1 +func (_m *mockEventsActivator) activateGroup(_a0 iaevents.PlacementProvider, _a1 []iaevents.CustomizableEvent) (*iaevents.ActiveEventGroup, error) { + ret := _m.Called(_a0, _a1) + + var r0 *iaevents.ActiveEventGroup + if rf, ok := ret.Get(0).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) *iaevents.ActiveEventGroup); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEventGroup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateMulti provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateMulti(_a0 iaevents.MultiActivator, _a1 []iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveMultiEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveMultiEvent + if rf, ok := ret.Get(0).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveMultiEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveMultiEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockFileInfoProvider is an autogenerated mock type for the fileInfoProvider type +type mockFileInfoProvider struct { + mock.Mock +} + +// fileLimit provides a mock function with given fields: +func (_m *mockFileInfoProvider) fileLimit() (uint64, error) { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) readFile(_a0 string) ([]byte, error) { + ret := _m.Called(_a0) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// lstat provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) lstat(_a0 string) (os.FileInfo, error) { + ret := _m.Called(_a0) + + var r0 os.FileInfo + if rf, ok := ret.Get(0).(func(string) os.FileInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(os.FileInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockPlacementMaker is an autogenerated mock type for the placementMaker type +type mockPlacementMaker struct { + mock.Mock +} + +// makeCorePlacements provides a mock function with given fields: cores, perfEvent +func (_m *mockPlacementMaker) makeCorePlacements(cores []int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(cores, factory) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func([]int, iaevents.PlacementFactory) []iaevents.PlacementProvider); ok { + r0 = rf(cores, factory) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]int, iaevents.PlacementFactory) error); ok { + r1 = rf(cores, factory) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// makeUncorePlacements provides a mock function with given fields: factory, socket +func (_m *mockPlacementMaker) makeUncorePlacements(socket int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(factory, socket) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func(iaevents.PlacementFactory, int) []iaevents.PlacementProvider); ok { + r0 = rf(factory, socket) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementFactory, int) error); ok { + r1 = rf(factory, socket) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockSysInfoProvider is an autogenerated mock type for the sysInfoProvider type +type mockSysInfoProvider struct { + mock.Mock +} + +// allCPUs provides a mock function with given fields: +func (_m *mockSysInfoProvider) allCPUs() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// allSockets provides a mock function with given fields: +func (_m *mockSysInfoProvider) allSockets() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockTransformer is an autogenerated mock type for the Transformer type +type MockTransformer struct { + mock.Mock +} + +// Transform provides a mock function with given fields: reader, matcher +func (_m *MockTransformer) Transform(reader iaevents.Reader, matcher iaevents.Matcher) ([]*iaevents.PerfEvent, error) { + ret := _m.Called(reader, matcher) + + var r0 []*iaevents.PerfEvent + if rf, ok := ret.Get(0).(func(iaevents.Reader, iaevents.Matcher) []*iaevents.PerfEvent); ok { + r0 = rf(reader, matcher) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*iaevents.PerfEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Reader, iaevents.Matcher) error); ok { + r1 = rf(reader, matcher) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/intel_pmu/reader.go b/plugins/inputs/intel_pmu/reader.go new file mode 100644 index 0000000000000..2df72a96618df --- /dev/null +++ b/plugins/inputs/intel_pmu/reader.go @@ -0,0 +1,249 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "time" + + ia "github.com/intel/iaevents" + "golang.org/x/sync/errgroup" +) + +type coreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + tag string + cpu int + + time time.Time +} + +type uncoreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + unitType string + unit string + tag string + socket int + + agg bool + + time time.Time +} + +type valuesReader interface { + readValue(event *ia.ActiveEvent) (ia.CounterValue, error) +} + +type iaValuesReader struct{} + +func (iaValuesReader) readValue(event *ia.ActiveEvent) (ia.CounterValue, error) { + return event.ReadValue() +} + +type entitiesValuesReader interface { + readEntities([]*CoreEventEntity, []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) +} + +type iaEntitiesValuesReader struct { + eventReader valuesReader + timer clock +} + +type clock interface { + now() time.Time +} + +type realClock struct{} + +func (realClock) now() time.Time { + return time.Now() +} + +func (ie *iaEntitiesValuesReader) readEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + var coreMetrics []coreMetric + var uncoreMetrics []uncoreMetric + + for _, entity := range coreEntities { + newMetrics, err := ie.readCoreEvents(entity) + if err != nil { + return nil, nil, err + } + coreMetrics = append(coreMetrics, newMetrics...) + } + for _, entity := range uncoreEntities { + newMetrics, err := ie.readUncoreEvents(entity) + if err != nil { + return nil, nil, err + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + return coreMetrics, uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + metrics := make([]coreMetric, len(entity.activeEvents)) + errGroup := errgroup.Group{} + + for i, event := range entity.activeEvents { + id := i + actualEvent := event + + if event == nil || event.PerfEvent == nil { + return nil, fmt.Errorf("active event or corresponding perf event is nil") + } + + errGroup.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err) + } + cpu, _ := actualEvent.PMUPlacement() + newMetric := coreMetric{ + values: values, + tag: entity.EventsTag, + cpu: cpu, + name: actualEvent.PerfEvent.Name, + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + } + err := errGroup.Wait() + if err != nil { + return nil, err + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) { + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + var uncoreMetrics []uncoreMetric + + for _, event := range entity.activeMultiEvents { + if entity.Aggregate { + newMetric, err := ie.readMultiEventAgg(event) + if err != nil { + return nil, err + } + newMetric.tag = entity.EventsTag + uncoreMetrics = append(uncoreMetrics, newMetric) + } else { + newMetrics, err := ie.readMultiEventSeparately(event) + if err != nil { + return nil, err + } + for i := range newMetrics { + newMetrics[i].tag = entity.EventsTag + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + } + return uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return nil, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + metrics := make([]uncoreMetric, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + newMetric := uncoreMetric{ + values: values, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: actualEvent.PMUName(), + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + err := group.Wait() + if err != nil { + return nil, err + } + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return uncoreMetric{}, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return uncoreMetric{}, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + values := make([]ia.CounterValue, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + value, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + values[id] = value + return nil + }) + } + err := group.Wait() + if err != nil { + return uncoreMetric{}, err + } + + bRaw, bEnabled, bRunning := ia.AggregateValues(values) + if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() { + return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent) + } + aggValues := ia.CounterValue{ + Raw: bRaw.Uint64(), + Enabled: bEnabled.Uint64(), + Running: bRunning.Uint64(), + } + newMetric := uncoreMetric{ + values: aggValues, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: ie.timer.now(), + } + return newMetric, nil +} diff --git a/plugins/inputs/intel_pmu/reader_test.go b/plugins/inputs/intel_pmu/reader_test.go new file mode 100644 index 0000000000000..409393383056f --- /dev/null +++ b/plugins/inputs/intel_pmu/reader_test.go @@ -0,0 +1,522 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "math" + "testing" + "time" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type moonClock struct{} + +func (moonClock) now() time.Time { + return time.Date(1969, 7, 20, 20, 17, 0, 0, time.UTC) +} + +type eventWithValues struct { + activeEvent *ia.ActiveEvent + values ia.CounterValue +} + +func TestReadCoreEvents(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("nil events", func(t *testing.T) { + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, nil) + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), "active event or corresponding perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}} + + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, event) + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + entity := &CoreEventEntity{} + var expected []coreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event2"}}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event3"}}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + entity.activeEvents = append(entity.activeEvents, tc.activeEvent) + cpu, _ := tc.activeEvent.PMUPlacement() + newMetric := coreMetric{ + values: tc.values, + tag: entity.EventsTag, + cpu: cpu, + name: tc.activeEvent.PerfEvent.Name, + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventSeparately(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("multi event is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{&iaValuesReader{}, moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "no active events or perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + perfEvent := &ia.PerfEvent{Name: "event"} + + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + multi := multiEvent{perfEvent: perfEvent} + var expected []uncoreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + multi.activeEvents = append(multi.activeEvents, tc.activeEvent) + newMetric := uncoreMetric{ + values: tc.values, + socket: multi.socket, + unitType: multi.perfEvent.PMUName, + name: multi.perfEvent.Name, + unit: tc.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventAgg(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + errMock := fmt.Errorf("mock error") + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + + tests := []struct { + name string + multi multiEvent + events []eventWithValues + result ia.CounterValue + readFail bool + errMsg string + }{ + { + name: "no events", + multi: multiEvent{perfEvent: perfEvent}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "no perf event", + multi: multiEvent{perfEvent: nil, activeEvents: []*ia.ActiveEvent{{}, {}}}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "successful reading and aggregation", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 5123, Enabled: 1231242, Running: 41123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4500, Enabled: 1823423, Running: 182343}}, + }, + result: ia.CounterValue{Raw: 9623, Enabled: 3054665, Running: 223466}, + errMsg: "", + }, + { + name: "to big numbers", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: math.MaxUint64, Enabled: 0, Running: 0}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}}, + }, + result: ia.CounterValue{}, + errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent), + }, + { + name: "reading fail", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 0, Enabled: 0, Running: 0}}, + }, + readFail: true, + result: ia.CounterValue{}, + errMsg: "failed to read uncore event", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for _, eventWithValue := range test.events { + test.multi.activeEvents = append(test.multi.activeEvents, eventWithValue.activeEvent) + if test.readFail { + mReader.On("readValue", eventWithValue.activeEvent).Return(ia.CounterValue{}, errMock).Once() + continue + } + mReader.On("readValue", eventWithValue.activeEvent).Return(eventWithValue.values, nil).Once() + } + metric, err := mEntitiesReader.readMultiEventAgg(test.multi) + mReader.AssertExpectations(t) + + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + expected := uncoreMetric{ + values: test.result, + socket: test.multi.socket, + unitType: test.multi.perfEvent.PMUName, + name: test.multi.perfEvent.Name, + time: mTimer.now(), + } + require.NoError(t, err) + require.Equal(t, expected, metric) + }) + } +} + +func TestReadUncoreEvents(t *testing.T) { + errMock := fmt.Errorf("mock error") + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("read aggregated entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + newMetric := uncoreMetric{ + values: ia.CounterValue{Raw: 6008, Enabled: 0, Running: 0}, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: mTimer.now(), + } + newMetric2 := uncoreMetric{ + values: ia.CounterValue{Raw: 125008, Enabled: 0, Running: 0}, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + time: mTimer.now(), + } + expected := []uncoreMetric{newMetric, newMetric2} + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entityAgg) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) + + t.Run("read distributed entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent, socket: 2} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2, socket: 1} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + var expected []uncoreMetric + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + entity := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) +} + +func TestReadEntities(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("read entities", func(t *testing.T) { + values := ia.CounterValue{} + socket := 0 + + corePerfEvent := &ia.PerfEvent{Name: "core event 1", PMUName: "cpu"} + activeCoreEvent := []*ia.ActiveEvent{{PerfEvent: corePerfEvent}} + coreMetric1 := coreMetric{values: values, name: corePerfEvent.Name, time: mTimer.now()} + + corePerfEvent2 := &ia.PerfEvent{Name: "core event 2", PMUName: "cpu"} + activeCoreEvent2 := []*ia.ActiveEvent{{PerfEvent: corePerfEvent2}} + coreMetric2 := coreMetric{values: values, name: corePerfEvent2.Name, time: mTimer.now()} + + uncorePerfEvent := &ia.PerfEvent{Name: "uncore event 1", PMUName: "cbox"} + activeUncoreEvent := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent}} + uncoreMetric1 := uncoreMetric{ + values: values, + name: uncorePerfEvent.Name, + unitType: uncorePerfEvent.PMUName, + socket: socket, + time: mTimer.now(), + } + + uncorePerfEvent2 := &ia.PerfEvent{Name: "uncore event 2", PMUName: "rig"} + activeUncoreEvent2 := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent2}} + uncoreMetric2 := uncoreMetric{ + values: values, + name: uncorePerfEvent2.Name, + unitType: uncorePerfEvent2.PMUName, + socket: socket, + time: mTimer.now(), + } + + coreEntities := []*CoreEventEntity{{activeEvents: activeCoreEvent}, {activeEvents: activeCoreEvent2}} + + uncoreEntities := []*UncoreEventEntity{ + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent, perfEvent: uncorePerfEvent, socket: socket}}}, + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent2, perfEvent: uncorePerfEvent2, socket: socket}}}, + } + + expectedCoreMetrics := []coreMetric{coreMetric1, coreMetric2} + expectedUncoreMetrics := []uncoreMetric{uncoreMetric1, uncoreMetric2} + + mReader.On("readValue", activeCoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeCoreEvent2[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent2[0]).Return(values, nil).Once() + + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, uncoreEntities) + + require.NoError(t, err) + require.Equal(t, expectedCoreMetrics, coreMetrics) + require.NotNil(t, expectedUncoreMetrics, uncoreMetrics) + mReader.AssertExpectations(t) + }) + + t.Run("core entity reading failed", func(t *testing.T) { + coreEntities := []*CoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) + + t.Run("uncore entity reading failed", func(t *testing.T) { + uncoreEntities := []*UncoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(nil, uncoreEntities) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) +} diff --git a/plugins/inputs/intel_pmu/resolver.go b/plugins/inputs/intel_pmu/resolver.go new file mode 100644 index 0000000000000..8457f48ca14db --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver.go @@ -0,0 +1,150 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + ia "github.com/intel/iaevents" +) + +type entitiesResolver interface { + resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesResolver struct { + reader ia.Reader + transformer ia.Transformer + log telegraf.Logger +} + +func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, entity := range coreEntities { + if entity == nil { + return fmt.Errorf("core entity is nil") + } + if entity.allEvents { + newEvents, _, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed core event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err) + } + if customEvent.Event.Uncore { + return fmt.Errorf("uncore event `%s` found in core entity", event.name) + } + event.custom = customEvent + } + } + for _, entity := range uncoreEntities { + if entity == nil { + return fmt.Errorf("uncore entity is nil") + } + if entity.allEvents { + _, newEvents, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed uncore event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err) + } + if !customEvent.Event.Uncore { + return fmt.Errorf("core event `%s` found in uncore entity", event.name) + } + event.custom = customEvent + } + } + return nil +} + +func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, uncoreEvents []*eventWithQuals, err error) { + if e.transformer == nil { + return nil, nil, errors.New("transformer is nil") + } + + perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher()) + if err != nil { + re, ok := err.(*ia.TransformationError) + if !ok { + return nil, nil, err + } + if e.log != nil && re != nil { + var eventErrs []string + for _, eventErr := range re.Errors() { + if eventErr == nil { + continue + } + eventErrs = append(eventErrs, eventErr.Error()) + } + errorsStr := strings.Join(eventErrs, ",\n") + e.log.Warnf("Cannot resolve all of the events from provided files:\n%s.\nSome events may be omitted.", errorsStr) + } + } + + for _, perfEvent := range perfEvents { + newEvent := &eventWithQuals{ + name: perfEvent.Name, + custom: ia.CustomizableEvent{Event: perfEvent}, + } + // build options for event + newEvent.custom.Options, err = ia.NewOptions().Build() + if err != nil { + return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err) + } + if perfEvent.Uncore { + uncoreEvents = append(uncoreEvents, newEvent) + continue + } + coreEvents = append(coreEvents, newEvent) + } + return coreEvents, uncoreEvents, nil +} + +func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.CustomizableEvent, error) { + var custom ia.CustomizableEvent + if e.transformer == nil { + return custom, errors.New("events transformer is nil") + } + if name == "" { + return custom, errors.New("event name is empty") + } + matcher := ia.NewNameMatcher(name) + perfEvents, err := e.transformer.Transform(e.reader, matcher) + if err != nil { + return custom, fmt.Errorf("failed to transform perf events: %v", err) + } + if len(perfEvents) < 1 { + return custom, fmt.Errorf("failed to resolve unknown event `%s`", name) + } + // build options for event + options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + if err != nil { + return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err) + } + custom = ia.CustomizableEvent{ + Event: perfEvents[0], + Options: options, + } + return custom, nil +} diff --git a/plugins/inputs/intel_pmu/resolver_test.go b/plugins/inputs/intel_pmu/resolver_test.go new file mode 100644 index 0000000000000..176b6d133772c --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver_test.go @@ -0,0 +1,376 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestResolveEntities(t *testing.T) { + errMock := errors.New("mock error") + mLog := testutil.Logger{} + mTransformer := &MockTransformer{} + mResolver := &iaEntitiesResolver{transformer: mTransformer, log: mLog} + + type test struct { + perfEvent *ia.PerfEvent + options ia.Options + event *eventWithQuals + } + + t.Run("nil entities", func(t *testing.T) { + err := mResolver.resolveEntities([]*CoreEventEntity{nil}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "core entity is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{nil}) + + require.Error(t, err) + require.Contains(t, err.Error(), "uncore entity is nil") + }) + + t.Run("nil parsed events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed core event is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed uncore event is nil") + }) + + t.Run("fail to resolve core events", func(t *testing.T) { + name := "mock event 1" + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve uncore events", func(t *testing.T) { + name := "mock event 1" + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve all core and uncore events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{allEvents: true} + mUncoreEntity := &UncoreEventEntity{allEvents: true} + corePerfEvents := []*ia.PerfEvent{ + {Name: "core event1"}, + {Name: "core event2"}, + {Name: "core event3"}, + } + uncorePerfEvents := []*ia.PerfEvent{ + {Name: "uncore event1", Uncore: true}, + {Name: "uncore event2", Uncore: true}, + {Name: "uncore event3", Uncore: true}, + } + matcher := ia.NewNameMatcher() + + t.Run("fail to resolve all core events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all uncore events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all events with transformationError", func(t *testing.T) { + transformErr := &ia.TransformationError{} + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, transformErr).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, transformErr).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, nil).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, nil).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + t.Run("uncore event found in core entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "uncore event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("core event found in uncore entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "core event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve core and uncore events", func(t *testing.T) { + var mCoreEvents []*eventWithQuals + var nUncoreEvents []*eventWithQuals + + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + emptyOptions, _ := ia.NewOptions().Build() + + coreTestCases := []test{ + {event: &eventWithQuals{name: "core1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "core1"}}, + {event: &eventWithQuals{name: "core2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core2"}}, + {event: &eventWithQuals{name: "core3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core3"}}, + } + uncoreTestCases := []test{ + {event: &eventWithQuals{name: "uncore1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "uncore1", Uncore: true}}, + {event: &eventWithQuals{name: "uncore2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore2", Uncore: true}}, + {event: &eventWithQuals{name: "uncore3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore3", Uncore: true}}, + } + + for _, test := range coreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + mCoreEvents = append(mCoreEvents, test.event) + } + + for _, test := range uncoreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + nUncoreEvents = append(nUncoreEvents, test.event) + } + + mCoreEntity := &CoreEventEntity{parsedEvents: mCoreEvents, allEvents: false} + mUncoreEntity := &UncoreEventEntity{parsedEvents: nUncoreEvents, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + + require.NoError(t, err) + for _, test := range append(coreTestCases, uncoreTestCases...) { + require.Equal(t, test.perfEvent, test.event.custom.Event) + require.Equal(t, test.options, test.event.custom.Options) + } + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveAllEvents(t *testing.T) { + mTransformer := &MockTransformer{} + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("no events", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, _, err := mResolver.resolveAllEvents() + require.NoError(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully resolved events", func(t *testing.T) { + perfEvent1 := &ia.PerfEvent{Name: "mock1"} + perfEvent2 := &ia.PerfEvent{Name: "mock2"} + uncorePerfEvent1 := &ia.PerfEvent{Name: "mock3", Uncore: true} + uncorePerfEvent2 := &ia.PerfEvent{Name: "mock4", Uncore: true} + + options, _ := ia.NewOptions().Build() + perfEvents := []*ia.PerfEvent{perfEvent1, perfEvent2, uncorePerfEvent1, uncorePerfEvent2} + + expectedCore := []*eventWithQuals{ + {name: perfEvent1.Name, custom: ia.CustomizableEvent{Event: perfEvent1, Options: options}}, + {name: perfEvent2.Name, custom: ia.CustomizableEvent{Event: perfEvent2, Options: options}}, + } + + expectedUncore := []*eventWithQuals{ + {name: uncorePerfEvent1.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent1, Options: options}}, + {name: uncorePerfEvent2.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent2, Options: options}}, + } + + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(perfEvents, nil) + + coreEvents, uncoreEvents, err := mResolver.resolveAllEvents() + require.NoError(t, err) + require.Equal(t, expectedCore, coreEvents) + require.Equal(t, expectedUncore, uncoreEvents) + + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveEvent(t *testing.T) { + mTransformer := &MockTransformer{} + mEvent := "mock event" + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, err := mResolver.resolveEvent("event", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "events transformer is nil") + }) + + t.Run("event is empty", func(t *testing.T) { + _, err := mResolver.resolveEvent("", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event name is empty") + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to transform perf events") + mTransformer.AssertExpectations(t) + }) + + t.Run("no events transformed", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve unknown event") + mTransformer.AssertExpectations(t) + }) + + t.Run("not valid qualifiers", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"wrong modifiers"} + + matcher := ia.NewNameMatcher(event) + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + _, err := mResolver.resolveEvent(event, qualifiers) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event)) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully transformed", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"config1=0x012h", "config2=0x034k"} + + matcher := ia.NewNameMatcher(event) + + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + + expectedOptions, _ := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + customEvent, err := mResolver.resolveEvent(event, qualifiers) + require.NoError(t, err) + require.Equal(t, mPerfEvent, customEvent.Event) + require.Equal(t, expectedOptions, customEvent.Options) + mTransformer.AssertExpectations(t) + }) +} diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md index 009c8cafc1cfb..4b0b88ab7fc32 100644 --- a/plugins/inputs/intel_powerstat/README.md +++ b/plugins/inputs/intel_powerstat/README.md @@ -1,11 +1,13 @@ # Intel PowerStat Input Plugin -This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. -Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. -Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems -to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. +This input plugin monitors power statistics on Intel-based platforms and assumes presence of Linux based OS. + +Main use cases are power saving and workload migration. Telemetry frameworks allow users to monitor critical platform level metrics. +Key source of platform telemetry is power domain that is beneficial for MANO/Monitoring&Analytics systems +to take preventive/corrective actions based on platform busyness, CPU temperature, actual CPU utilization and power statistics. + +## Configuration -### Configuration: ```toml # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. [[inputs.intel_powerstat]] @@ -17,52 +19,65 @@ to take preventive/corrective actions based on platform busyness, CPU temperatur ## "cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles" # cpu_metrics = [] ``` -### Example: Configuration with no per-CPU telemetry + +## Example: Configuration with no per-CPU telemetry + This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: + ```toml [[inputs.intel_powerstat]] cpu_metrics = [] ``` -### Example: Configuration with no per-CPU telemetry - equivalent case +## Example: Configuration with no per-CPU telemetry - equivalent case + This configuration allows getting global metrics (processor package specific), no per-CPU metrics are collected: + ```toml [[inputs.intel_powerstat]] ``` -### Example: Configuration for CPU Temperature and Frequency only +## Example: Configuration for CPU Temperature and Frequency only + This configuration allows getting global metrics plus subset of per-CPU metrics (CPU Temperature and Current Frequency): + ```toml [[inputs.intel_powerstat]] cpu_metrics = ["cpu_frequency", "cpu_temperature"] ``` -### Example: Configuration with all available metrics +## Example: Configuration with all available metrics + This configuration allows getting global metrics and all per-CPU metrics: + ```toml [[inputs.intel_powerstat]] cpu_metrics = ["cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles"] ``` -### SW Dependencies: +## SW Dependencies + Plugin is based on Linux Kernel modules that expose specific metrics over `sysfs` or `devfs` interfaces. The following dependencies are expected by plugin: + - _intel-rapl_ module which exposes Intel Runtime Power Limiting metrics over `sysfs` (`/sys/devices/virtual/powercap/intel-rapl`), - _msr_ kernel module that provides access to processor model specific registers over `devfs` (`/dev/cpu/cpu%d/msr`), -- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). +- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). Minimum kernel version required is 3.13 to satisfy all requirements. Please make sure that kernel modules are loaded and running. You might have to manually enable them by using `modprobe`. Exact commands to be executed are: -``` + +```sh sudo modprobe cpufreq-stats sudo modprobe msr sudo modprobe intel_rapl ``` -**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** +**Telegraf with Intel PowerStat plugin enabled may require root access to read model specific registers (MSRs)** to retrieve data for calculation of most critical per-CPU specific metrics: + - `cpu_busy_frequency_mhz` - `cpu_temperature_celsius` - `cpu_c1_state_residency_percent` @@ -71,23 +86,25 @@ to retrieve data for calculation of most critical per-CPU specific metrics: To expose other Intel PowerStat metrics root access may or may not be required (depending on OS type or configuration). -### HW Dependencies: -Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to -read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. +## HW Dependencies + +Specific metrics require certain processor features to be present, otherwise Intel PowerStat plugin won't be able to +read them. When using Linux Kernel based OS, user can detect supported processor features reading `/proc/cpuinfo` file. Plugin assumes crucial properties are the same for all CPU cores in the system. The following processor properties are examined in more detail in this section: processor _cpu family_, _model_ and _flags_. The following processor properties are required by the plugin: -- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific + +- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific model specific registers for all features - The following processor flags shall be present: - - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect - the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, + - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect + the following metrics: _powerstat_core.cpu_temperature_, _powerstat_core.cpu_busy_frequency_, _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_, _powerstat_core._cpu_c6_state_residency_ - - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, + - "_aperfmperf_" shall be present to collect the following metrics: _powerstat_core.cpu_busy_frequency_, _powerstat_core.cpu_busy_cycles_, _powerstat_core.cpu_c1_state_residency_ - - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ -- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ + - "_dts_" shall be present to collect _powerstat_core.cpu_temperature_ +- Processor _Model number_ must be one of the following values for plugin to read _powerstat_core.cpu_c1_state_residency_ and _powerstat_core.cpu_c6_state_residency_ metrics: | Model number | Processor name | @@ -95,12 +112,12 @@ and _powerstat_core.cpu_c6_state_residency_ metrics: | 0x37 | Intel Atom® Bay Trail | | 0x4D | Intel Atom® Avaton | | 0x5C | Intel Atom® Apollo Lake | -| 0x5F | Intel Atom® Denverton | +| 0x5F | Intel Atom® Denverton | | 0x7A | Intel Atom® Goldmont | | 0x4C | Intel Atom® Airmont | | 0x86 | Intel Atom® Jacobsville | -| 0x96 | Intel Atom® Elkhart Lake | -| 0x9C | Intel Atom® Jasper Lake | +| 0x96 | Intel Atom® Elkhart Lake | +| 0x9C | Intel Atom® Jasper Lake | | 0x1A | Intel Nehalem-EP | | 0x1E | Intel Nehalem | | 0x1F | Intel Nehalem-G | @@ -138,27 +155,32 @@ and _powerstat_core.cpu_c6_state_residency_ metrics: | 0x8F | Intel Sapphire Rapids X | | 0x8C | Intel TigerLake-L | | 0x8D | Intel TigerLake | - -### Metrics + +## Metrics + All metrics collected by Intel PowerStat plugin are collected in fixed intervals. Metrics that reports processor C-state residency or power are calculated over elapsed intervals. When starting to measure metrics, plugin skips first iteration of metrics if they are based on deltas with previous value. - + **The following measurements are supported by Intel PowerStat plugin:** + - powerstat_core - - The following Tags are returned by plugin with powerstat_core measurements: + - The following Tags are returned by plugin with powerstat_core measurements: + ```text | Tag | Description | |-----|-------------| | `package_id` | ID of platform package/socket | - | `core_id` | ID of physical processor core | + | `core_id` | ID of physical processor core | | `cpu_id` | ID of logical processor core | - Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) + Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) while core_id and package_id tags are additional topology information. + ``` - - Available metrics for powerstat_core measurement - + - Available metrics for powerstat_core measurement + + ```text | Metric name (field) | Description | Units | |-----|-------------|-----| | `cpu_frequency_mhz` | Current operational frequency of CPU Core | MHz | @@ -167,31 +189,33 @@ When starting to measure metrics, plugin skips first iteration of metrics if the | `cpu_c1_state_residency_percent` | Percentage of time that CPU Core spent in C1 Core residency state | % | | `cpu_c6_state_residency_percent` | Percentage of time that CPU Core spent in C6 Core residency state | % | | `cpu_busy_cycles_percent` | CPU Core Busy cycles as a ratio of Cycles spent in C0 state residency to all cycles executed by CPU Core | % | - - + ``` - powerstat_package - - The following Tags are returned by plugin with powerstat_package measurements: + - The following Tags are returned by plugin with powerstat_package measurements: + ```text | Tag | Description | |-----|-------------| | `package_id` | ID of platform package/socket | - Measurement powerstat_package metrics are collected per processor package - _package_id_ tag indicates which + Measurement powerstat_package metrics are collected per processor package -_package_id_ tag indicates which package metric refers to. + ``` - - Available metrics for powerstat_package measurement + - Available metrics for powerstat_package measurement + ```text | Metric name (field) | Description | Units | |-----|-------------|-----| - | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | + | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | | `current_power_consumption_watts` | Current power consumption of processor package | Watts | | `current_dram_power_consumption_watts` | Current power consumption of processor package DRAM subsystem | Watts | + ``` +### Example Output -### Example Output: - -``` +```shell powerstat_package,host=ubuntu,package_id=0 thermal_design_power_watts=160 1606494744000000000 powerstat_package,host=ubuntu,package_id=0 current_power_consumption_watts=35 1606494744000000000 powerstat_package,host=ubuntu,package_id=0 current_dram_power_consumption_watts=13.94 1606494744000000000 diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index cc98c13b6c0e0..5c49d08be502d 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -1,22 +1,26 @@ # Intel RDT Input Plugin -The `intel_rdt` plugin collects information provided by monitoring features of -the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor -and control the utilization of shared resources (ex: last level cache, memory bandwidth). -### About Intel RDT +The `intel_rdt` plugin collects information provided by monitoring features of +the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor +and control the utilization of shared resources (ex: last level cache, memory bandwidth). + +## About Intel RDT + Intel’s Resource Director Technology (RDT) framework consists of: -- Cache Monitoring Technology (CMT) + +- Cache Monitoring Technology (CMT) - Memory Bandwidth Monitoring (MBM) -- Cache Allocation Technology (CAT) -- Code and Data Prioritization (CDP) +- Cache Allocation Technology (CAT) +- Code and Data Prioritization (CDP) -As multithreaded and multicore platform architectures emerge, the last level cache and -memory bandwidth are key resources to manage for running workloads in single-threaded, -multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT -and CDP to manage these workloads across shared resources. +As multithreaded and multicore platform architectures emerge, the last level cache and +memory bandwidth are key resources to manage for running workloads in single-threaded, +multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT +and CDP to manage these workloads across shared resources. -### Prerequsities - PQoS Tool -To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a +## Prerequsities - PQoS Tool + +To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. @@ -24,7 +28,7 @@ Note: pqos tool needs root privileges to work properly. Metrics will be constantly reported from the following `pqos` commands within the given interval: -#### If telegraf does not run as the root user +### If telegraf does not run as the root user The `pqos` binary needs to run as root. If telegraf is running as a non-root user, you may enable sudo to allow `pqos` to run correctly. @@ -40,40 +44,46 @@ Alternately, you may enable sudo to allow `pqos` to run correctly, as follows: Add the following to your sudoers file (assumes telegraf runs as a user named `telegraf`): -``` +```sh telegraf ALL=(ALL) NOPASSWD:/usr/sbin/pqos -r --iface-os --mon-file-type=csv --mon-interval=* ``` If you wish to use sudo, you must also add `use_sudo = true` to the Telegraf configuration (see below). -#### In case of cores monitoring: -``` +### In case of cores monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] ``` + where `CORES` is equal to group of cores provided in config. User can provide many groups. -#### In case of process monitoring: -``` +### In case of process monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-pid=all:[PIDS]\;mbt:[PIDS] ``` + where `PIDS` is group of processes IDs which name are equal to provided process name in a config. User can provide many process names which lead to create many processes groups. In both cases `INTERVAL` is equal to sampling_interval from config. -Because PIDs association within system could change in every moment, Intel RDT plugin provides a +Because PIDs association within system could change in every moment, Intel RDT plugin provides a functionality to check on every interval if desired processes change their PIDs association. If some change is reported, plugin will restart _pqos_ tool with new arguments. If provided by user process name is not equal to any of available processes, will be omitted and plugin will constantly check for process availability. -### Useful links -Pqos installation process: https://github.com/intel/intel-cmt-cat/blob/master/INSTALL -Enabling OS interface: https://github.com/intel/intel-cmt-cat/wiki, https://github.com/intel/intel-cmt-cat/wiki/resctrl -More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html +## Useful links + +Pqos installation process: +Enabling OS interface: , +More about Intel RDT: + +## Configuration -### Configuration ```toml # Read Intel RDT metrics [[inputs.intel_rdt]] @@ -81,7 +91,7 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ## This value is propagated to pqos tool. Interval format is defined by pqos itself. ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # sampling_interval = "10" - + ## Optionally specify the path to pqos executable. ## If not provided, auto discovery will be performed. # pqos_path = "/usr/local/bin/pqos" @@ -105,7 +115,8 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t # use_sudo = false ``` -### Exposed metrics +## Exposed metrics + | Name | Full name | Description | |---------------|-----------------------------------------------|-------------| | MBL | Memory Bandwidth on Local NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the local NUMA memory channel | @@ -117,7 +128,8 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t *optional -### Troubleshooting +## Troubleshooting + Pointing to non-existing cores will lead to throwing an error by _pqos_ and the plugin will not work properly. Be sure to check provided core number exists within desired system. @@ -126,13 +138,16 @@ Do not use any other _pqos_ instance that is monitoring the same cores or PIDs w It is not possible to monitor same cores or PIDs on different groups. PIDs associated for the given process could be manually checked by `pidof` command. E.g: -``` + +```sh pidof PROCESS ``` + where `PROCESS` is process name. -### Example Output -``` +## Example Output + +```shell > rdt_metric,cores=12\,19,host=r2-compute-20,name=IPC,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC_Misses,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC,process=top value=0 1598962030000000000 diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index 486a13c98c535..d354bb855aacf 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -66,6 +66,12 @@ type processMeasurement struct { measurement string } +type splitCSVLine struct { + timeValue string + metricsValues []string + coreOrPIDsValues []string +} + // All gathering is done in the Start function func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { return nil @@ -230,8 +236,8 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s } for _, availableProcess := range availableProcesses { if choice.Contains(availableProcess.Name, providedProcesses) { - PID := availableProcess.PID - mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", PID) + "," + pid := availableProcess.PID + mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", pid) + "," } } for key := range mapProcessPIDs { @@ -258,7 +264,7 @@ func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAss r.wg.Add(1) defer r.wg.Done() - cmd := exec.Command(r.PqosPath, append(args)...) + cmd := exec.Command(r.PqosPath, args...) if r.UseSudo { // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` @@ -327,13 +333,13 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati if len(r.Processes) != 0 { newMetric := processMeasurement{} - PIDs, err := findPIDsInMeasurement(out) + pids, err := findPIDsInMeasurement(out) if err != nil { r.errorChan <- err break } for processName, PIDsProcess := range processesPIDsAssociation { - if PIDs == PIDsProcess { + if pids == PIDsProcess { newMetric.name = processName newMetric.measurement = out } @@ -482,29 +488,29 @@ func validateAndParseCores(coreStr string) ([]int, error) { func findPIDsInMeasurement(measurements string) (string, error) { // to distinguish PIDs from Cores (PIDs should be in quotes) var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`) - PIDsMatch := insideQuoteRegex.FindStringSubmatch(measurements) - if len(PIDsMatch) < 2 { + pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements) + if len(pidsMatch) < 2 { return "", fmt.Errorf("cannot find PIDs in measurement line") } - PIDs := PIDsMatch[1] - return PIDs, nil + pids := pidsMatch[1] + return pids, nil } -func splitCSVLineIntoValues(line string) (timeValue string, metricsValues, coreOrPIDsValues []string, err error) { +func splitCSVLineIntoValues(line string) (splitCSVLine, error) { values, err := splitMeasurementLine(line) if err != nil { - return "", nil, nil, err + return splitCSVLine{}, err } - timeValue = values[0] + timeValue := values[0] // Because pqos csv format is broken when many cores are involved in PID or // group of PIDs, there is need to work around it. E.g.: // Time,PID,Core,IPC,LLC Misses,LLC[KB],MBL[MB/s],MBR[MB/s],MBT[MB/s] // 2020-08-12 13:34:36,"45417,29170,",37,44,0.00,0,0.0,0.0,0.0,0.0 - metricsValues = values[len(values)-numberOfMetrics:] - coreOrPIDsValues = values[1 : len(values)-numberOfMetrics] + metricsValues := values[len(values)-numberOfMetrics:] + coreOrPIDsValues := values[1 : len(values)-numberOfMetrics] - return timeValue, metricsValues, coreOrPIDsValues, nil + return splitCSVLine{timeValue, metricsValues, coreOrPIDsValues}, nil } func validateInterval(interval int32) error { @@ -523,7 +529,7 @@ func splitMeasurementLine(line string) ([]string, error) { } func parseTime(value string) (time.Time, error) { - timestamp, err := time.Parse(timestampFormat, value) + timestamp, err := time.ParseInLocation(timestampFormat, value, time.Local) if err != nil { return time.Time{}, err } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 1eecbc5018125..18dd2e93aa1c1 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -52,18 +52,18 @@ func TestSplitCSVLineIntoValues(t *testing.T) { expectedMetricsValue := []string{"0.00", "0", "0.0", "0.0", "0.0", "0.0"} expectedCoreOrPidsValue := []string{"\"45417", "29170\"", "37", "44"} - timeValue, metricsValue, coreOrPidsValue, err := splitCSVLineIntoValues(line) + splitCSV, err := splitCSVLineIntoValues(line) assert.Nil(t, err) - assert.Equal(t, expectedTimeValue, timeValue) - assert.Equal(t, expectedMetricsValue, metricsValue) - assert.Equal(t, expectedCoreOrPidsValue, coreOrPidsValue) + assert.Equal(t, expectedTimeValue, splitCSV.timeValue) + assert.Equal(t, expectedMetricsValue, splitCSV.metricsValues) + assert.Equal(t, expectedCoreOrPidsValue, splitCSV.coreOrPIDsValues) wrongLine := "2020-08-12 13:34:36,37,44,0.00,0,0.0" - timeValue, metricsValue, coreOrPidsValue, err = splitCSVLineIntoValues(wrongLine) + splitCSV, err = splitCSVLineIntoValues(wrongLine) assert.NotNil(t, err) - assert.Equal(t, "", timeValue) - assert.Nil(t, nil, metricsValue) - assert.Nil(t, nil, coreOrPidsValue) + assert.Equal(t, "", splitCSV.timeValue) + assert.Nil(t, nil, splitCSV.metricsValues) + assert.Nil(t, nil, splitCSV.coreOrPIDsValues) } func TestFindPIDsInMeasurement(t *testing.T) { @@ -107,7 +107,6 @@ func TestCreateArgsCores(t *testing.T) { assert.EqualValues(t, expected, result) cores = []string{"1,2,3", "4,5,6"} - expected = "--mon-core=" expectedPrefix := "--mon-core=" expectedSubstring := "all:[1,2,3];mbt:[1,2,3];" expectedSubstring2 := "all:[4,5,6];mbt:[4,5,6];" diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index a567e1aacb1fa..4fdb91dc7b128 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -5,12 +5,26 @@ package intel_rdt import ( "context" + "errors" "strings" "time" "github.com/influxdata/telegraf" ) +type parsedCoresMeasurement struct { + cores string + values []float64 + time time.Time +} + +type parsedProcessMeasurement struct { + process string + cores string + values []float64 + time time.Time +} + // Publisher for publish new RDT metrics to telegraf accumulator type Publisher struct { acc telegraf.Accumulator @@ -50,48 +64,48 @@ func (p *Publisher) publish(ctx context.Context) { } func (p *Publisher) publishCores(measurement string) { - coresString, values, timestamp, err := parseCoresMeasurement(measurement) + parsedCoresMeasurement, err := parseCoresMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorCores(coresString, values, timestamp) + p.addToAccumulatorCores(parsedCoresMeasurement) } func (p *Publisher) publishProcess(measurement processMeasurement) { - process, coresString, values, timestamp, err := parseProcessesMeasurement(measurement) + parsedProcessMeasurement, err := parseProcessesMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorProcesses(process, coresString, values, timestamp) + p.addToAccumulatorProcesses(parsedProcessMeasurement) } -func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { +func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error) { var values []float64 - timeValue, metricsValues, cores, err := splitCSVLineIntoValues(measurements) + splitCSV, err := splitCSVLineIntoValues(measurements) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } // change string slice to one string and separate it by coma - coresString := strings.Join(cores, ",") + coresString := strings.Join(splitCSV.coreOrPIDsValues, ",") // trim unwanted quotes coresString = strings.Trim(coresString, "\"") - for _, metric := range metricsValues { + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } values = append(values, parsedValue) } - return coresString, values, timestamp, nil + return parsedCoresMeasurement{coresString, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -102,41 +116,47 @@ func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, tags := map[string]string{} fields := make(map[string]interface{}) - tags["cores"] = cores + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } } -func parseProcessesMeasurement(measurement processMeasurement) (string, string, []float64, time.Time, error) { - var values []float64 - timeValue, metricsValues, coreOrPidsValues, pids, err := parseProcessMeasurement(measurement.measurement) +func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMeasurement, error) { + splitCSV, err := splitCSVLineIntoValues(measurement.measurement) + if err != nil { + return parsedProcessMeasurement{}, err + } + pids, err := findPIDsInMeasurement(measurement.measurement) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err + } + lenOfPIDs := len(strings.Split(pids, ",")) + if lenOfPIDs > len(splitCSV.coreOrPIDsValues) { + return parsedProcessMeasurement{}, errors.New("detected more pids (quoted) than actual number of pids in csv line") } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } actualProcess := measurement.name - lenOfPids := len(strings.Split(pids, ",")) - cores := coreOrPidsValues[lenOfPids:] - coresString := strings.Trim(strings.Join(cores, ","), `"`) + cores := strings.Trim(strings.Join(splitCSV.coreOrPIDsValues[lenOfPIDs:], ","), `"`) - for _, metric := range metricsValues { + var values []float64 + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } values = append(values, parsedValue) } - return actualProcess, coresString, values, timestamp, nil + return parsedProcessMeasurement{actualProcess, cores, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -147,23 +167,11 @@ func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metr tags := map[string]string{} fields := make(map[string]interface{}) - tags["process"] = process - tags["cores"] = cores + tags["process"] = measurement.process + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) - } -} - -func parseProcessMeasurement(measurements string) (string, []string, []string, string, error) { - timeValue, metricsValues, coreOrPidsValues, err := splitCSVLineIntoValues(measurements) - if err != nil { - return "", nil, nil, "", err - } - pids, err := findPIDsInMeasurement(measurements) - if err != nil { - return "", nil, nil, "", err + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } - return timeValue, metricsValues, coreOrPidsValues, pids, nil } diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 7db71e9ac5afa..2529a2235a1b9 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -37,29 +37,29 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.Nil(t, err) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) t.Run("not valid measurement string", func(t *testing.T) { measurement := "not, valid, measurement" - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid values string", func(t *testing.T) { measurement := fmt.Sprintf("%s,%s,%s,%s,%f,%f,%f,%f", @@ -72,12 +72,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid timestamp format", func(t *testing.T) { invalidTimestamp := "2020-08-12-21 13:34:" @@ -91,12 +91,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) } @@ -119,44 +119,36 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) newMeasurement := processMeasurement{ name: processName, measurement: measurement, } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + result, err := parseProcessesMeasurement(newMeasurement) assert.Nil(t, err) - assert.Equal(t, processName, actualProcess) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, processName, result.process) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) - t.Run("not valid measurement string", func(t *testing.T) { - processName := "process_name" - measurement := "invalid,measurement,format" - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid timestamp format", func(t *testing.T) { - invalidTimestamp := "2020-20-20-31" - measurement := fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", + invalidTimestamp := "2020-20-20-31" + negativeTests := []struct { + name string + measurement string + }{{ + name: "not valid measurement string", + measurement: "invalid,measurement,format", + }, { + name: "not valid timestamp format", + measurement: fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", invalidTimestamp, pids, cores, @@ -165,44 +157,42 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["LLC"], metricsValues["MBL"], metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid values string", func(t *testing.T) { - measurement := fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", - timestamp, - pids, - cores, - "1##", - "da", - metricsValues["LLC"], - metricsValues["MBL"], - metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + metricsValues["MBT"]), + }, + { + name: "not valid values string", + measurement: fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", + timestamp, + pids, + cores, + "1##", + "da", + metricsValues["LLC"], + metricsValues["MBL"], + metricsValues["MBR"], + metricsValues["MBT"]), + }, + { + name: "not valid csv line with quotes", + measurement: "0000-08-02 0:00:00,,\",,,,,,,,,,,,,,,,,,,,,,,,\",,", + }, + } - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) + for _, test := range negativeTests { + t.Run(test.name, func(t *testing.T) { + newMeasurement := processMeasurement{ + name: processName, + measurement: test.measurement, + } + result, err := parseProcessesMeasurement(newMeasurement) + + assert.NotNil(t, err) + assert.Equal(t, "", result.process) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) + }) + } } func TestAddToAccumulatorCores(t *testing.T) { @@ -212,9 +202,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetrics { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -226,9 +216,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetricsShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -244,9 +234,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcesses { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -259,9 +249,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcessesShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index 35e14c77d0fbb..6c2468cd76068 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -5,7 +5,7 @@ The `internal` plugin collects metrics about the telegraf agent itself. Note that some metrics are aggregates across all instances of one type of plugin. -### Configuration: +## Configuration ```toml # Collect statistics about itself @@ -14,71 +14,69 @@ plugin. # collect_memstats = true ``` -### Measurements & Fields: +## Measurements & Fields -memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats +memstats are taken from the Go runtime: - internal_memstats - - alloc_bytes - - frees - - heap_alloc_bytes - - heap_idle_bytes - - heap_in_use_bytes - - heap_objects_bytes - - heap_released_bytes - - heap_sys_bytes - - mallocs - - num_gc - - pointer_lookups - - sys_bytes - - total_alloc_bytes + - alloc_bytes + - frees + - heap_alloc_bytes + - heap_idle_bytes + - heap_in_use_bytes + - heap_objects_bytes + - heap_released_bytes + - heap_sys_bytes + - mallocs + - num_gc + - pointer_lookups + - sys_bytes + - total_alloc_bytes agent stats collect aggregate stats on all telegraf plugins. - internal_agent - - gather_errors - - metrics_dropped - - metrics_gathered - - metrics_written + - gather_errors + - metrics_dropped + - metrics_gathered + - metrics_written internal_gather stats collect aggregate stats on all input plugins that are of the same input type. They are tagged with `input=` `version=` and `go_version=`. - internal_gather - - gather_time_ns - - metrics_gathered + - gather_time_ns + - metrics_gathered internal_write stats collect aggregate stats on all output plugins that are of the same input type. They are tagged with `output=` and `version=`. - - internal_write - - buffer_limit - - buffer_size - - metrics_added - - metrics_written - - metrics_dropped - - metrics_filtered - - write_time_ns + - buffer_limit + - buffer_size + - metrics_added + - metrics_written + - metrics_dropped + - metrics_filtered + - write_time_ns internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of plugin and `version=`. - internal_ - - individual plugin-specific fields, such as requests counts. + - individual plugin-specific fields, such as requests counts. -### Tags: +## Tags All measurements for specific plugins are tagged with information relevant to each particular plugin and with `version=`. +## Example Output -### Example Output: - -``` +```shell internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md index f9a71446f4979..0d10cc7d22655 100644 --- a/plugins/inputs/internet_speed/README.md +++ b/plugins/inputs/internet_speed/README.md @@ -16,7 +16,6 @@ The `Internet Speed Monitor` collects data about the internet speed on the syste It collects latency, download speed and upload speed - | Name | filed name | type | Unit | | -------------- | ---------- | ------- | ---- | | Download Speed | download | float64 | Mbps | @@ -27,4 +26,4 @@ It collects latency, download speed and upload speed ```sh internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index 5da647f47793f..5bd586fa15f61 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -2,7 +2,8 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. -### Configuration +## Configuration + ```toml [[inputs.interrupts]] ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is @@ -18,7 +19,7 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p # irq = [ "NET_RX", "TASKLET" ] ``` -### Metrics +## Metrics There are two styles depending on the value of `cpu_as_tag`. @@ -64,10 +65,11 @@ With `cpu_as_tag = true`: - fields: - count (int, number of interrupts) -### Example Output +## Example Output With `cpu_as_tag = false`: -``` + +```shell interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,cpu=cpu0 count=23i 1489346531000000000 interrupts,irq=1,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,cpu=cpu1 count=1i 1489346531000000000 @@ -75,7 +77,8 @@ soft_interrupts,irq=NET_RX,cpu=cpu0 count=280879i 1489346531000000000 ``` With `cpu_as_tag = true`: -``` + +```shell interrupts,cpu=cpu6,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 interrupts,cpu=cpu7,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 soft_interrupts,cpu=cpu0,irq=HI count=246441i 1543539773000000000 diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 609409985cb35..b704188df68a8 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -5,26 +5,29 @@ Get bare metal metrics using the command line utility If no servers are specified, the plugin will query the local machine sensor stats via the following command: -``` +```sh ipmitool sdr ``` + or with the version 2 schema: -``` + +```sh ipmitool sdr elist ``` When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: -``` +```sh ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` Any of the following parameters will be added to the aformentioned query if they're configured: -``` + +```sh -y hex_key -L privilege ``` -### Configuration +## Configuration ```toml # Read metrics from the bare metal servers via IPMI @@ -72,9 +75,10 @@ Any of the following parameters will be added to the aformentioned query if they # cache_path = "" ``` -### Measurements +## Measurements Version 1 schema: + - ipmi_sensor: - tags: - name @@ -86,6 +90,7 @@ Version 1 schema: - value (float) Version 2 schema: + - ipmi_sensor: - tags: - name @@ -98,17 +103,19 @@ Version 2 schema: - fields: - value (float) -#### Permissions +### Permissions When gathering from the local system, Telegraf will need permission to the ipmi device node. When using udev you can create the device node giving `rw` permissions to the `telegraf` user by adding the following rule to `/etc/udev/rules.d/52-telegraf-ipmi.rules`: -``` +```sh KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ``` + Alternatively, it is possible to use sudo. You will need the following in your telegraf config: + ```toml [[inputs.ipmi_sensor]] use_sudo = true @@ -124,11 +131,13 @@ telegraf ALL=(root) NOPASSWD: IPMITOOL Defaults!IPMITOOL !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output + +### Version 1 Schema -#### Version 1 Schema When retrieving stats from a remote server: -``` + +```shell ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -137,9 +146,9 @@ ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 15171255130 ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` - When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -151,7 +160,8 @@ ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 #### Version 2 Schema When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index d26e739e96d43..801188130c960 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -151,7 +151,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { cmd := execCommand(name, dumpOpts...) out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } } opts = append(opts, "-S") @@ -170,7 +170,7 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { return m.parseV2(acc, hostname, out, timestamp) @@ -315,6 +315,16 @@ func aToFloat(val string) (float64, error) { return f, nil } +func sanitizeIPMICmd(args []string) []string { + for i, v := range args { + if v == "-P" { + args[i+1] = "REDACTED" + } + } + + return args +} + func trim(s string) string { return strings.TrimSpace(s) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 4a2910101ab82..504a7467f5130 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -779,3 +779,51 @@ func Test_parseV2(t *testing.T) { }) } } + +func TestSanitizeIPMICmd(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "default args", + args: []string{ + "-H", "localhost", + "-U", "username", + "-P", "password", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-P", "REDACTED", + "-I", "lan", + }, + }, + { + name: "no password", + args: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + }, + { + name: "empty args", + args: []string{}, + expected: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sanitizedArgs []string = sanitizeIPMICmd(tt.args) + require.Equal(t, tt.expected, sanitizedArgs) + }) + } +} diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md index f4477254f117d..945ed43847dba 100644 --- a/plugins/inputs/ipset/README.md +++ b/plugins/inputs/ipset/README.md @@ -5,33 +5,37 @@ It uses the output of the command "ipset save". Ipsets created without the "counters" option are ignored. Results are tagged with: + - ipset name - ipset entry There are 3 ways to grant telegraf the right to run ipset: -* Run as root (strongly discouraged) -* Use sudo -* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. -### Using systemd capabilities +- Run as root (strongly discouraged) +- Use sudo +- Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. + +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```text [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN ``` -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.ipset]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -40,7 +44,7 @@ telegraf ALL=(root) NOPASSWD: IPSETSAVE Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Configuration +## Configuration ```toml [[inputs.ipset]] @@ -56,15 +60,15 @@ Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output -``` +```sh $ sudo ipset save create myset hash:net family inet hashsize 1024 maxelem 65536 counters comment add myset 10.69.152.1 packets 8 bytes 672 comment "machine A" ``` -``` +```sh $ telegraf --config telegraf.conf --input-filter ipset --test --debug * Plugin: inputs.ipset, Collection 1 > ipset,rule=10.69.152.1,host=trashme,set=myset bytes_total=8i,packets_total=672i 1507615028000000000 diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index db730c88178ff..c6d14dd2d41fb 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -14,11 +14,11 @@ The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You ha * Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option. * Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup. -### Using systemd capabilities +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```shell [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN @@ -26,9 +26,10 @@ AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process. -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.iptables]] use_sudo = true @@ -44,11 +45,11 @@ telegraf ALL=(root) NOPASSWD: IPTABLESSHOW Defaults!IPTABLESSHOW !logfile, !syslog, !pam_session ``` -### Using IPtables lock feature +## Using IPtables lock feature Defining multiple instances of this plugin in telegraf.conf can lead to concurrent IPtables access resulting in "ERROR in input [inputs.iptables]: exit status 4" messages in telegraf.log and missing metrics. Setting 'use_lock = true' in the plugin configuration will run IPtables with the '-w' switch, allowing a lock usage to prevent this error. -### Configuration: +## Configuration ```toml # use sudo to run iptables @@ -63,25 +64,24 @@ Defining multiple instances of this plugin in telegraf.conf can lead to concurre chains = [ "INPUT" ] ``` -### Measurements & Fields: - +## Measurements & Fields -- iptables - - pkts (integer, count) - - bytes (integer, bytes) +* iptables + * pkts (integer, count) + * bytes (integer, bytes) -### Tags: +## Tags -- All measurements have the following tags: - - table - - chain - - ruleid +* All measurements have the following tags: + * table + * chain + * ruleid The `ruleid` is the comment associated to the rule. -### Example Output: +## Example Output -``` +```text $ iptables -nvL INPUT Chain INPUT (policy DROP 0 packets, 0 bytes) pkts bytes target prot opt in out source destination @@ -89,7 +89,7 @@ pkts bytes target prot opt in out source destination 42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */ ``` -``` +```shell $ ./telegraf --config telegraf.conf --input-filter iptables --test iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455 iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455 diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index 75e5b51037085..2a44c9d15e47f 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -5,14 +5,14 @@ metrics about ipvs virtual and real servers. **Supported Platforms:** Linux -### Configuration +## Configuration ```toml [[inputs.ipvs]] # no configuration ``` -#### Permissions +### Permissions Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -20,7 +20,7 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -### Metrics +## Metrics Server will contain tags identifying how it was configured, using one of `address` + `port` + `protocol` *OR* `fwmark`. This is how one would normally @@ -66,17 +66,19 @@ configure a virtual server using `ipvsadm`. - pps_out - cps -### Example Output +## Example Output Virtual server is configured using `fwmark` and backed by 2 real servers: -``` + +```shell ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 ``` Virtual server is configured using `proto+addr+port` and backed by 2 real servers: -``` + +```shell ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_fwmark=47 cps=0i,active_connections=0i,inactive_connections=0i,connections=0i,pkts_in=0i,bytes_out=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,pps_out=0i 1541019340000000000 diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index e12326031b9ef..5726af2cbd5a2 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -4,7 +4,7 @@ The jenkins plugin gathers information about the nodes and jobs running in a jen This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. -### Configuration: +## Configuration ```toml [[inputs.jenkins]] @@ -55,7 +55,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API # max_connections = 5 ``` -### Metrics: +## Metrics - jenkins - tags: @@ -65,7 +65,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - busy_executors - total_executors -+ jenkins_node +- jenkins_node - tags: - arch - disk_path @@ -96,23 +96,22 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - number - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) -### Sample Queries: +## Sample Queries -``` +```sql SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) ``` -``` +```sql SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter jenkins --test jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000 jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 ``` - diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 96ee48701b464..3b152f8e096a4 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,8 +1,8 @@ # Jolokia Input Plugin -**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +## Deprecated in version 1.5: Please use the [jolokia2][] plugin -#### Configuration +## Configuration ```toml # Read JMX metrics through Jolokia @@ -61,13 +61,15 @@ attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" ``` -#### Description +## Description The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. +See [official Jolokia website](https://jolokia.org/) for more information. -See: https://jolokia.org/ +## Measurements -# Measurements: Jolokia plugin produces one measure for each metric configured, adding Server's `jolokia_name`, `jolokia_host` and `jolokia_port` as tags. + +[jolokia2]: /plugins/inputs/jolokia2 diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index e91e9a1087fda..084a84577fdc9 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validThreeLevelMultiValueJSON = ` @@ -143,8 +143,8 @@ func TestHttpJsonMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -167,8 +167,8 @@ func TestHttpJsonBulkResponse(t *testing.T) { var acc testutil.Accumulator err := jolokia.Gather(&acc) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -195,8 +195,8 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_java.lang:type=Memory_ObjectPendingFinalizationCount": 0.0, @@ -228,9 +228,9 @@ func TestHttp404(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected @@ -241,7 +241,7 @@ func TestHttpInvalidJson(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "error decoding JSON response") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "error decoding JSON response") } diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index a944949dbab7e..ae4b6a5015042 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -2,9 +2,9 @@ The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). -### Configuration: +## Configuration -#### Jolokia Agent Configuration +### Jolokia Agent Configuration The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints. @@ -34,7 +34,7 @@ Optionally, specify TLS options for communicating with agents: paths = ["Uptime"] ``` -#### Jolokia Proxy Configuration +### Jolokia Proxy Configuration The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint. @@ -79,7 +79,7 @@ Optionally, specify TLS options for communicating with proxies: paths = ["Uptime"] ``` -#### Jolokia Metric Configuration +### Jolokia Metric Configuration Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean. @@ -103,7 +103,7 @@ Use `paths` to refine which fields to collect. The preceeding `jvm_memory` `metric` declaration produces the following output: -``` +```text jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000 ``` @@ -119,7 +119,7 @@ Use `*` wildcards against `mbean` property-key values to create distinct series Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and `name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` declaration produces two metrics. -``` +```shell jvm_garbage_collector,name=G1\ Old\ Generation CollectionCount=0,CollectionTime=0 1503762520000000000 jvm_garbage_collector,name=G1\ Young\ Generation CollectionTime=32,CollectionCount=2 1503762520000000000 ``` @@ -137,7 +137,7 @@ Use `tag_prefix` along with `tag_keys` to add detail to tag names. The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each with a distinct `pool_name` tag. -``` +```text jvm_memory_pool,pool_name=Compressed\ Class\ Space PeakUsage.max=1073741824,PeakUsage.committed=3145728,PeakUsage.init=0,Usage.committed=3145728,Usage.init=0,PeakUsage.used=3017976,Usage.max=1073741824,Usage.used=3017976 1503764025000000000 jvm_memory_pool,pool_name=Code\ Cache PeakUsage.init=2555904,PeakUsage.committed=6291456,Usage.committed=6291456,PeakUsage.used=6202752,PeakUsage.max=251658240,Usage.used=6210368,Usage.max=251658240,Usage.init=2555904 1503764025000000000 jvm_memory_pool,pool_name=G1\ Eden\ Space CollectionUsage.max=-1,PeakUsage.committed=56623104,PeakUsage.init=56623104,PeakUsage.used=53477376,Usage.max=-1,Usage.committed=49283072,Usage.used=19922944,CollectionUsage.committed=49283072,CollectionUsage.init=56623104,CollectionUsage.used=0,PeakUsage.max=-1,Usage.init=56623104 1503764025000000000 @@ -158,7 +158,7 @@ Use substitutions to create fields and field prefixes with MBean property-keys c The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka topic. The `name` Mbean property-key is used as a field prefix to aid in gathering fields together into the single metric. -``` +```text kafka_topic,topic=my-topic BytesOutPerSec.MeanRate=0,FailedProduceRequestsPerSec.MeanRate=0,BytesOutPerSec.EventType="bytes",BytesRejectedPerSec.Count=0,FailedProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.EventType="requests",MessagesInPerSec.RateUnit="SECONDS",BytesInPerSec.EventType="bytes",BytesOutPerSec.RateUnit="SECONDS",BytesInPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.EventType="requests",TotalFetchRequestsPerSec.MeanRate=146.301533938701,BytesOutPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.MeanRate=0,BytesRejectedPerSec.FifteenMinuteRate=0,MessagesInPerSec.FiveMinuteRate=0,BytesInPerSec.Count=0,BytesRejectedPerSec.MeanRate=0,FailedFetchRequestsPerSec.MeanRate=0,FailedFetchRequestsPerSec.FiveMinuteRate=0,FailedFetchRequestsPerSec.FifteenMinuteRate=0,FailedProduceRequestsPerSec.Count=0,TotalFetchRequestsPerSec.FifteenMinuteRate=128.59314292334466,TotalFetchRequestsPerSec.OneMinuteRate=126.71551273850747,TotalFetchRequestsPerSec.Count=1353483,TotalProduceRequestsPerSec.FifteenMinuteRate=0,FailedFetchRequestsPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.Count=0,FailedProduceRequestsPerSec.FifteenMinuteRate=0,TotalFetchRequestsPerSec.FiveMinuteRate=130.8516148751592,TotalFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.RateUnit="SECONDS",BytesInPerSec.MeanRate=0,FailedFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.OneMinuteRate=0,BytesOutPerSec.Count=0,BytesOutPerSec.OneMinuteRate=0,MessagesInPerSec.FifteenMinuteRate=0,MessagesInPerSec.MeanRate=0,BytesInPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.OneMinuteRate=0,TotalProduceRequestsPerSec.EventType="requests",BytesRejectedPerSec.FiveMinuteRate=0,BytesRejectedPerSec.EventType="bytes",BytesOutPerSec.FiveMinuteRate=0,FailedProduceRequestsPerSec.FiveMinuteRate=0,MessagesInPerSec.Count=0,TotalProduceRequestsPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.OneMinuteRate=0,MessagesInPerSec.EventType="messages",MessagesInPerSec.OneMinuteRate=0,TotalFetchRequestsPerSec.EventType="requests",BytesInPerSec.RateUnit="SECONDS",BytesInPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.Count=0 1503767532000000000 ``` @@ -170,7 +170,7 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration | `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. | | `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. | -### Example Configurations: +## Example Configurations - [ActiveMQ](/plugins/inputs/jolokia2/examples/activemq.conf) - [BitBucket](/plugins/inputs/jolokia2/examples/bitbucket.conf) diff --git a/plugins/inputs/jolokia2/gatherer_test.go b/plugins/inputs/jolokia2/gatherer_test.go index 4ba4b586ad5f4..e01c603addaeb 100644 --- a/plugins/inputs/jolokia2/gatherer_test.go +++ b/plugins/inputs/jolokia2/gatherer_test.go @@ -3,7 +3,7 @@ package jolokia2 import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJolokia2_makeReadRequests(t *testing.T) { @@ -96,9 +96,9 @@ func TestJolokia2_makeReadRequests(t *testing.T) { for _, c := range cases { payload := makeReadRequests([]Metric{c.metric}) - assert.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) + require.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) for _, actual := range payload { - assert.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) + require.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) } } } diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index 01750bf002ff5..af22a27358b32 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" @@ -80,7 +80,7 @@ func TestJolokia2_ScalarValues(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "scalar_without_attribute", map[string]interface{}{ "value": 123.0, @@ -240,7 +240,7 @@ func TestJolokia2_ObjectValues(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "object_without_attribute", map[string]interface{}{ "biz": 123.0, @@ -328,7 +328,7 @@ func TestJolokia2_StatusCodes(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "ok", map[string]interface{}{ "value": 1.0, @@ -378,7 +378,7 @@ func TestJolokia2_TagRenaming(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_tag_prefix", map[string]interface{}{ "value": 123.0, @@ -471,7 +471,7 @@ func TestJolokia2_FieldRenaming(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_field_modifiers", map[string]interface{}{ "DEFAULT_PREFIX_hello_DEFAULT_SEPARATOR_world": 123.0, @@ -579,7 +579,7 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "mbean_name_and_object_keys", map[string]interface{}{ "value": 123.0, @@ -672,7 +672,7 @@ func TestJolokia2_MetricCompaction(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "compact_metric", map[string]interface{}{ "value": 123.0, @@ -733,7 +733,7 @@ func TestJolokia2_ProxyTargets(t *testing.T) { plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "hello", map[string]interface{}{ "value": 123.0, @@ -755,11 +755,11 @@ func TestFillFields(t *testing.T) { results := map[string]interface{}{} newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complexPoint, results) - assert.Equal(t, map[string]interface{}{}, results) + require.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalarPoint, results) - assert.Equal(t, map[string]interface{}{}, results) + require.Equal(t, map[string]interface{}{}, results) } func setupServer(resp string) *httptest.Server { diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index 1a28b55aeb8d9..895a4b5cf3de6 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -3,7 +3,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data from listed sensors using Junos Telemetry Interface. Refer to [openconfig.net](http://openconfig.net/) for more details about OpenConfig and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html). -### Configuration: +## Configuration ```toml # Subscribe and receive OpenConfig Telemetry data using JTI @@ -57,7 +57,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f str_as_tags = false ``` -### Tags: +## Tags - All measurements are tagged appropriately using the identifier information in incoming data diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go index 7ddeefacab635..1342758887932 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go @@ -1,182 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: authentication_service.proto +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: auth/authentication_service.proto -/* -Package authentication is a generated protocol buffer package. - -It is generated from these files: - authentication_service.proto - -It has these top-level messages: - LoginRequest - LoginReply -*/ package authentication -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The request message containing the user's name, password and client id type LoginRequest struct { - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *LoginRequest) Reset() { + *x = LoginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoginRequest) ProtoMessage() {} + +func (x *LoginRequest) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LoginRequest) Reset() { *m = LoginRequest{} } -func (m *LoginRequest) String() string { return proto.CompactTextString(m) } -func (*LoginRequest) ProtoMessage() {} -func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. +func (*LoginRequest) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{0} +} -func (m *LoginRequest) GetUserName() string { - if m != nil { - return m.UserName +func (x *LoginRequest) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *LoginRequest) GetPassword() string { - if m != nil { - return m.Password +func (x *LoginRequest) GetPassword() string { + if x != nil { + return x.Password } return "" } -func (m *LoginRequest) GetClientId() string { - if m != nil { - return m.ClientId +func (x *LoginRequest) GetClientId() string { + if x != nil { + return x.ClientId } return "" } +// // The response message containing the result of login attempt. // result value of true indicates success and false indicates // failure type LoginReply struct { - Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LoginReply) Reset() { *m = LoginReply{} } -func (m *LoginReply) String() string { return proto.CompactTextString(m) } -func (*LoginReply) ProtoMessage() {} -func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} -func (m *LoginReply) GetResult() bool { - if m != nil { - return m.Result +func (x *LoginReply) Reset() { + *x = LoginReply{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func init() { - proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest") - proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply") +func (x *LoginReply) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*LoginReply) ProtoMessage() {} -// Client API for Login service - -type LoginClient interface { - LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) -} - -type loginClient struct { - cc *grpc.ClientConn +func (x *LoginReply) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func NewLoginClient(cc *grpc.ClientConn) LoginClient { - return &loginClient{cc} +// Deprecated: Use LoginReply.ProtoReflect.Descriptor instead. +func (*LoginReply) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{1} } -func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { - out := new(LoginReply) - err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *LoginReply) GetResult() bool { + if x != nil { + return x.Result } - return out, nil + return false } -// Server API for Login service +var File_auth_authentication_service_proto protoreflect.FileDescriptor + +var file_auth_authentication_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x32, + 0x51, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x48, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x12, 0x5a, 0x10, 0x2e, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_auth_authentication_service_proto_rawDescOnce sync.Once + file_auth_authentication_service_proto_rawDescData = file_auth_authentication_service_proto_rawDesc +) -type LoginServer interface { - LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) +func file_auth_authentication_service_proto_rawDescGZIP() []byte { + file_auth_authentication_service_proto_rawDescOnce.Do(func() { + file_auth_authentication_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_authentication_service_proto_rawDescData) + }) + return file_auth_authentication_service_proto_rawDescData } -func RegisterLoginServer(s *grpc.Server, srv LoginServer) { - s.RegisterService(&_Login_serviceDesc, srv) +var file_auth_authentication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_auth_authentication_service_proto_goTypes = []interface{}{ + (*LoginRequest)(nil), // 0: authentication.LoginRequest + (*LoginReply)(nil), // 1: authentication.LoginReply +} +var file_auth_authentication_service_proto_depIdxs = []int32{ + 0, // 0: authentication.Login.LoginCheck:input_type -> authentication.LoginRequest + 1, // 1: authentication.Login.LoginCheck:output_type -> authentication.LoginReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoginServer).LoginCheck(ctx, in) +func init() { file_auth_authentication_service_proto_init() } +func file_auth_authentication_service_proto_init() { + if File_auth_authentication_service_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/authentication.Login/LoginCheck", + if !protoimpl.UnsafeEnabled { + file_auth_authentication_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_auth_authentication_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Login_serviceDesc = grpc.ServiceDesc{ - ServiceName: "authentication.Login", - HandlerType: (*LoginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoginCheck", - Handler: _Login_LoginCheck_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_auth_authentication_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "authentication_service.proto", -} - -func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9, - 0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, - 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2, - 0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2, - 0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, - 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97, - 0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, - 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b, - 0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8, - 0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39, - 0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21, - 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00, + GoTypes: file_auth_authentication_service_proto_goTypes, + DependencyIndexes: file_auth_authentication_service_proto_depIdxs, + MessageInfos: file_auth_authentication_service_proto_msgTypes, + }.Build() + File_auth_authentication_service_proto = out.File + file_auth_authentication_service_proto_rawDesc = nil + file_auth_authentication_service_proto_goTypes = nil + file_auth_authentication_service_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto index a41e13a09f7d9..f67b67a6c5730 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto @@ -25,6 +25,7 @@ syntax = "proto3"; package authentication; +option go_package = ".;authentication"; // The Login service definition. service Login { diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go new file mode 100644 index 0000000000000..bbbf200ec68be --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package authentication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoginClient is the client API for Login service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoginClient interface { + LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) +} + +type loginClient struct { + cc grpc.ClientConnInterface +} + +func NewLoginClient(cc grpc.ClientConnInterface) LoginClient { + return &loginClient{cc} +} + +func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { + out := new(LoginReply) + err := c.cc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LoginServer is the server API for Login service. +// All implementations must embed UnimplementedLoginServer +// for forward compatibility +type LoginServer interface { + LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) + mustEmbedUnimplementedLoginServer() +} + +// UnimplementedLoginServer must be embedded to have forward compatible implementations. +type UnimplementedLoginServer struct { +} + +func (UnimplementedLoginServer) LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoginCheck not implemented") +} +func (UnimplementedLoginServer) mustEmbedUnimplementedLoginServer() {} + +// UnsafeLoginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoginServer will +// result in compilation errors. +type UnsafeLoginServer interface { + mustEmbedUnimplementedLoginServer() +} + +func RegisterLoginServer(s grpc.ServiceRegistrar, srv LoginServer) { + s.RegisterService(&Login_ServiceDesc, srv) +} + +func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoginServer).LoginCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/authentication.Login/LoginCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Login_ServiceDesc is the grpc.ServiceDesc for Login service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Login_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "authentication.Login", + HandlerType: (*LoginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LoginCheck", + Handler: _Login_LoginCheck_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth/authentication_service.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/gen.go b/plugins/inputs/jti_openconfig_telemetry/gen.go new file mode 100644 index 0000000000000..0b97e3bea9e55 --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/gen.go @@ -0,0 +1,11 @@ +package jti_openconfig_telemetry + +// To run these commands, make sure that protoc-gen-go and protoc-gen-go-grpc are installed +// > go install google.golang.org/protobuf/cmd/protoc-gen-go +// > go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +// +// Generated files were last generated with: +// - protoc-gen-go: v1.27.1 +// - protoc-gen-go-grpc: v1.1.0 +//go:generate protoc --go_out=auth/ --go-grpc_out=auth/ auth/authentication_service.proto +//go:generate protoc --go_out=oc/ --go-grpc_out=oc/ oc/oc.proto diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index bc7c780458f99..19d16dccc501a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -1,54 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: oc.proto - -/* -Package telemetry is a generated protocol buffer package. - -It is generated from these files: - oc.proto - -It has these top-level messages: - SubscriptionRequest - SubscriptionInput - Collector - Path - SubscriptionAdditionalConfig - SubscriptionReply - SubscriptionResponse - OpenConfigData - KeyValue - Delete - Eom - CancelSubscriptionRequest - CancelSubscriptionReply - GetSubscriptionsRequest - GetSubscriptionsReply - GetOperationalStateRequest - GetOperationalStateReply - DataEncodingRequest - DataEncodingReply -*/ -package telemetry +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: oc/oc.proto -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package telemetry import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Result of the operation type ReturnCode int32 @@ -59,21 +29,46 @@ const ( ReturnCode_UNKNOWN_ERROR ReturnCode = 2 ) -var ReturnCode_name = map[int32]string{ - 0: "SUCCESS", - 1: "NO_SUBSCRIPTION_ENTRY", - 2: "UNKNOWN_ERROR", -} -var ReturnCode_value = map[string]int32{ - "SUCCESS": 0, - "NO_SUBSCRIPTION_ENTRY": 1, - "UNKNOWN_ERROR": 2, +// Enum value maps for ReturnCode. +var ( + ReturnCode_name = map[int32]string{ + 0: "SUCCESS", + 1: "NO_SUBSCRIPTION_ENTRY", + 2: "UNKNOWN_ERROR", + } + ReturnCode_value = map[string]int32{ + "SUCCESS": 0, + "NO_SUBSCRIPTION_ENTRY": 1, + "UNKNOWN_ERROR": 2, + } +) + +func (x ReturnCode) Enum() *ReturnCode { + p := new(ReturnCode) + *p = x + return p } func (x ReturnCode) String() string { - return proto.EnumName(ReturnCode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReturnCode) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[0].Descriptor() +} + +func (ReturnCode) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[0] +} + +func (x ReturnCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReturnCode.Descriptor instead. +func (ReturnCode) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} } -func (ReturnCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Verbosity Level type VerbosityLevel int32 @@ -84,21 +79,46 @@ const ( VerbosityLevel_BRIEF VerbosityLevel = 2 ) -var VerbosityLevel_name = map[int32]string{ - 0: "DETAIL", - 1: "TERSE", - 2: "BRIEF", -} -var VerbosityLevel_value = map[string]int32{ - "DETAIL": 0, - "TERSE": 1, - "BRIEF": 2, +// Enum value maps for VerbosityLevel. +var ( + VerbosityLevel_name = map[int32]string{ + 0: "DETAIL", + 1: "TERSE", + 2: "BRIEF", + } + VerbosityLevel_value = map[string]int32{ + "DETAIL": 0, + "TERSE": 1, + "BRIEF": 2, + } +) + +func (x VerbosityLevel) Enum() *VerbosityLevel { + p := new(VerbosityLevel) + *p = x + return p } func (x VerbosityLevel) String() string { - return proto.EnumName(VerbosityLevel_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VerbosityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[1].Descriptor() +} + +func (VerbosityLevel) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[1] +} + +func (x VerbosityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VerbosityLevel.Descriptor instead. +func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} } -func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Encoding Type Supported type EncodingType int32 @@ -110,126 +130,248 @@ const ( EncodingType_PROTO3 EncodingType = 3 ) -var EncodingType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "XML", - 2: "JSON_IETF", - 3: "PROTO3", -} -var EncodingType_value = map[string]int32{ - "UNDEFINED": 0, - "XML": 1, - "JSON_IETF": 2, - "PROTO3": 3, +// Enum value maps for EncodingType. +var ( + EncodingType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "XML", + 2: "JSON_IETF", + 3: "PROTO3", + } + EncodingType_value = map[string]int32{ + "UNDEFINED": 0, + "XML": 1, + "JSON_IETF": 2, + "PROTO3": 3, + } +) + +func (x EncodingType) Enum() *EncodingType { + p := new(EncodingType) + *p = x + return p } func (x EncodingType) String() string { - return proto.EnumName(EncodingType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncodingType) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[2].Descriptor() +} + +func (EncodingType) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[2] +} + +func (x EncodingType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncodingType.Descriptor instead. +func (EncodingType) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} } -func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // Message sent for a telemetry subscription request type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data associated with a telemetry subscription - Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` // The below configuration is not defined in Openconfig RPC. // It is a proposed extension to configure additional // subscription request features. - AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig" json:"additional_config,omitempty"` + AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"` } -func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } -func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*SubscriptionRequest) ProtoMessage() {} -func (*SubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionRequest) GetInput() *SubscriptionInput { - if m != nil { - return m.Input +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriptionRequest) GetInput() *SubscriptionInput { + if x != nil { + return x.Input } return nil } -func (m *SubscriptionRequest) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionRequest) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } -func (m *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { - if m != nil { - return m.AdditionalConfig +func (x *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { + if x != nil { + return x.AdditionalConfig } return nil } // Data associated with a telemetry subscription type SubscriptionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of optional collector endpoints to send data for // this subscription. // If no collector destinations are specified, the collector // destination is assumed to be the requester on the rpc channel. - CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList" json:"collector_list,omitempty"` + CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList,proto3" json:"collector_list,omitempty"` +} + +func (x *SubscriptionInput) Reset() { + *x = SubscriptionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SubscriptionInput) Reset() { *m = SubscriptionInput{} } -func (m *SubscriptionInput) String() string { return proto.CompactTextString(m) } -func (*SubscriptionInput) ProtoMessage() {} -func (*SubscriptionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *SubscriptionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *SubscriptionInput) GetCollectorList() []*Collector { - if m != nil { - return m.CollectorList +func (*SubscriptionInput) ProtoMessage() {} + +func (x *SubscriptionInput) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionInput.ProtoReflect.Descriptor instead. +func (*SubscriptionInput) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionInput) GetCollectorList() []*Collector { + if x != nil { + return x.CollectorList } return nil } // Collector endpoints to send data specified as an ip+port combination. type Collector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // IP address of collector endpoint - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Transport protocol port number for the collector destination. - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (m *Collector) Reset() { *m = Collector{} } -func (m *Collector) String() string { return proto.CompactTextString(m) } -func (*Collector) ProtoMessage() {} -func (*Collector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *Collector) Reset() { + *x = Collector{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Collector) GetAddress() string { - if m != nil { - return m.Address +func (x *Collector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collector) ProtoMessage() {} + +func (x *Collector) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collector.ProtoReflect.Descriptor instead. +func (*Collector) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} +} + +func (x *Collector) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Collector) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Collector) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } // Data model path type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data model path of interest // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Regular expression to be used in filtering state leaves - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // If this is set to true, the target device will only send // updates to the collector upon a change in data value - SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged" json:"suppress_unchanged,omitempty"` + SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged,proto3" json:"suppress_unchanged,omitempty"` // Maximum time in ms the target device may go without sending // a message to the collector. If this time expires with // suppress-unchanged set, the target device must send an update // message regardless if the data values have changed. - MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval" json:"max_silent_interval,omitempty"` + MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval,proto3" json:"max_silent_interval,omitempty"` // Time in ms between collection and transmission of the // specified data to the collector platform. The target device // will sample the corresponding data (e.g,. a counter) and @@ -237,143 +379,263 @@ type Path struct { // // If sample-frequency is set to 0, then the network device // must emit an update upon every datum change. - SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency" json:"sample_frequency,omitempty"` + SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency,proto3" json:"sample_frequency,omitempty"` // EOM needed for each walk cycle of this path? // For periodic sensor, applicable for each complete reap // For event sensor, applicable when initial dump is over // (same as EOS) // This feature is not implemented currently. - NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom" json:"need_eom,omitempty"` + NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom,proto3" json:"need_eom,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Path) GetPath() string { - if m != nil { - return m.Path +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{3} +} + +func (x *Path) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *Path) GetFilter() string { - if m != nil { - return m.Filter +func (x *Path) GetFilter() string { + if x != nil { + return x.Filter } return "" } -func (m *Path) GetSuppressUnchanged() bool { - if m != nil { - return m.SuppressUnchanged +func (x *Path) GetSuppressUnchanged() bool { + if x != nil { + return x.SuppressUnchanged } return false } -func (m *Path) GetMaxSilentInterval() uint32 { - if m != nil { - return m.MaxSilentInterval +func (x *Path) GetMaxSilentInterval() uint32 { + if x != nil { + return x.MaxSilentInterval } return 0 } -func (m *Path) GetSampleFrequency() uint32 { - if m != nil { - return m.SampleFrequency +func (x *Path) GetSampleFrequency() uint32 { + if x != nil { + return x.SampleFrequency } return 0 } -func (m *Path) GetNeedEom() bool { - if m != nil { - return m.NeedEom +func (x *Path) GetNeedEom() bool { + if x != nil { + return x.NeedEom } return false } // Configure subscription request additional features. type SubscriptionAdditionalConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // limit the number of records sent in the stream - LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords" json:"limit_records,omitempty"` + LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords,proto3" json:"limit_records,omitempty"` // limit the time the stream remains open - LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds" json:"limit_time_seconds,omitempty"` + LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds,proto3" json:"limit_time_seconds,omitempty"` // EOS needed for this subscription? - NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos" json:"need_eos,omitempty"` + NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos,proto3" json:"need_eos,omitempty"` +} + +func (x *SubscriptionAdditionalConfig) Reset() { + *x = SubscriptionAdditionalConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionAdditionalConfig) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscriptionAdditionalConfig) Reset() { *m = SubscriptionAdditionalConfig{} } -func (m *SubscriptionAdditionalConfig) String() string { return proto.CompactTextString(m) } -func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (m *SubscriptionAdditionalConfig) GetLimitRecords() int32 { - if m != nil { - return m.LimitRecords +func (x *SubscriptionAdditionalConfig) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionAdditionalConfig.ProtoReflect.Descriptor instead. +func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{4} +} + +func (x *SubscriptionAdditionalConfig) GetLimitRecords() int32 { + if x != nil { + return x.LimitRecords } return 0 } -func (m *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { - if m != nil { - return m.LimitTimeSeconds +func (x *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { + if x != nil { + return x.LimitTimeSeconds } return 0 } -func (m *SubscriptionAdditionalConfig) GetNeedEos() bool { - if m != nil { - return m.NeedEos +func (x *SubscriptionAdditionalConfig) GetNeedEos() bool { + if x != nil { + return x.NeedEos } return false } // 1. Reply data message sent out using out-of-band channel. type SubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Response message to a telemetry subscription creation or // get request. - Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` } -func (m *SubscriptionReply) Reset() { *m = SubscriptionReply{} } -func (m *SubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*SubscriptionReply) ProtoMessage() {} -func (*SubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *SubscriptionReply) Reset() { + *x = SubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionReply) GetResponse() *SubscriptionResponse { - if m != nil { - return m.Response +func (x *SubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionReply) ProtoMessage() {} + +func (x *SubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionReply.ProtoReflect.Descriptor instead. +func (*SubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{5} +} + +func (x *SubscriptionReply) GetResponse() *SubscriptionResponse { + if x != nil { + return x.Response } return nil } -func (m *SubscriptionReply) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionReply) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } // Response message to a telemetry subscription creation or get request. type SubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique id for the subscription on the device. This is // generated by the device and returned in a subscription // request or when listing existing subscriptions - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } -func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } -func (*SubscriptionResponse) ProtoMessage() {} -func (*SubscriptionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *SubscriptionResponse) Reset() { + *x = SubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionResponse) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *SubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionResponse) ProtoMessage() {} + +func (x *SubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionResponse.ProtoReflect.Descriptor instead. +func (*SubscriptionResponse) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{6} +} + +func (x *SubscriptionResponse) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } @@ -381,112 +643,147 @@ func (m *SubscriptionResponse) GetSubscriptionId() uint32 { // 2. Telemetry data send back on the same connection as the // subscription request. type OpenConfigData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // router name:export IP address - SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId" json:"system_id,omitempty"` + SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id,omitempty"` // line card / RE (slot number) - ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId" json:"component_id,omitempty"` + ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // PFE (if applicable) - SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId" json:"sub_component_id,omitempty"` + SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId,proto3" json:"sub_component_id,omitempty"` // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,4,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Sequence number, monotonically increasing for each // system_id, component_id, sub_component_id + path. - SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` // timestamp (milliseconds since epoch) - Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // List of key-value pairs - Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv,proto3" json:"kv,omitempty"` // For delete. If filled, it indicates delete - Delete []*Delete `protobuf:"bytes,8,rep,name=delete" json:"delete,omitempty"` + Delete []*Delete `protobuf:"bytes,8,rep,name=delete,proto3" json:"delete,omitempty"` // If filled, it indicates end of marker for the // respective path in the list. - Eom []*Eom `protobuf:"bytes,9,rep,name=eom" json:"eom,omitempty"` + Eom []*Eom `protobuf:"bytes,9,rep,name=eom,proto3" json:"eom,omitempty"` // If filled, it indicates end of sync for complete subscription - SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse" json:"sync_response,omitempty"` + SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse,proto3" json:"sync_response,omitempty"` } -func (m *OpenConfigData) Reset() { *m = OpenConfigData{} } -func (m *OpenConfigData) String() string { return proto.CompactTextString(m) } -func (*OpenConfigData) ProtoMessage() {} -func (*OpenConfigData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *OpenConfigData) Reset() { + *x = OpenConfigData{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *OpenConfigData) GetSystemId() string { - if m != nil { - return m.SystemId +func (x *OpenConfigData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenConfigData) ProtoMessage() {} + +func (x *OpenConfigData) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenConfigData.ProtoReflect.Descriptor instead. +func (*OpenConfigData) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{7} +} + +func (x *OpenConfigData) GetSystemId() string { + if x != nil { + return x.SystemId } return "" } -func (m *OpenConfigData) GetComponentId() uint32 { - if m != nil { - return m.ComponentId +func (x *OpenConfigData) GetComponentId() uint32 { + if x != nil { + return x.ComponentId } return 0 } -func (m *OpenConfigData) GetSubComponentId() uint32 { - if m != nil { - return m.SubComponentId +func (x *OpenConfigData) GetSubComponentId() uint32 { + if x != nil { + return x.SubComponentId } return 0 } -func (m *OpenConfigData) GetPath() string { - if m != nil { - return m.Path +func (x *OpenConfigData) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *OpenConfigData) GetSequenceNumber() uint64 { - if m != nil { - return m.SequenceNumber +func (x *OpenConfigData) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber } return 0 } -func (m *OpenConfigData) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp +func (x *OpenConfigData) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp } return 0 } -func (m *OpenConfigData) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *OpenConfigData) GetKv() []*KeyValue { + if x != nil { + return x.Kv } return nil } -func (m *OpenConfigData) GetDelete() []*Delete { - if m != nil { - return m.Delete +func (x *OpenConfigData) GetDelete() []*Delete { + if x != nil { + return x.Delete } return nil } -func (m *OpenConfigData) GetEom() []*Eom { - if m != nil { - return m.Eom +func (x *OpenConfigData) GetEom() []*Eom { + if x != nil { + return x.Eom } return nil } -func (m *OpenConfigData) GetSyncResponse() bool { - if m != nil { - return m.SyncResponse +func (x *OpenConfigData) GetSyncResponse() bool { + if x != nil { + return x.SyncResponse } return false } // Simple Key-value, where value could be one of scalar types type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Key - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // One of possible values // - // Types that are valid to be assigned to Value: + // Types that are assignable to Value: // *KeyValue_DoubleValue // *KeyValue_IntValue // *KeyValue_UintValue @@ -497,44 +794,44 @@ type KeyValue struct { Value isKeyValue_Value `protobuf_oneof:"value"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -type isKeyValue_Value interface { - isKeyValue_Value() +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type KeyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,oneof"` +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) } -type KeyValue_IntValue struct { - IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,oneof"` -} -type KeyValue_UintValue struct { - UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,oneof"` -} -type KeyValue_SintValue struct { - SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,oneof"` -} -type KeyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,oneof"` -} -type KeyValue_StrValue struct { - StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,oneof"` + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type KeyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{8} } -func (*KeyValue_DoubleValue) isKeyValue_Value() {} -func (*KeyValue_IntValue) isKeyValue_Value() {} -func (*KeyValue_UintValue) isKeyValue_Value() {} -func (*KeyValue_SintValue) isKeyValue_Value() {} -func (*KeyValue_BoolValue) isKeyValue_Value() {} -func (*KeyValue_StrValue) isKeyValue_Value() {} -func (*KeyValue_BytesValue) isKeyValue_Value() {} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} func (m *KeyValue) GetValue() isKeyValue_Value { if m != nil { @@ -543,323 +840,412 @@ func (m *KeyValue) GetValue() isKeyValue_Value { return nil } -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*KeyValue_DoubleValue); ok { +func (x *KeyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*KeyValue_DoubleValue); ok { return x.DoubleValue } return 0 } -func (m *KeyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*KeyValue_IntValue); ok { +func (x *KeyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*KeyValue_IntValue); ok { return x.IntValue } return 0 } -func (m *KeyValue) GetUintValue() uint64 { - if x, ok := m.GetValue().(*KeyValue_UintValue); ok { +func (x *KeyValue) GetUintValue() uint64 { + if x, ok := x.GetValue().(*KeyValue_UintValue); ok { return x.UintValue } return 0 } -func (m *KeyValue) GetSintValue() int64 { - if x, ok := m.GetValue().(*KeyValue_SintValue); ok { +func (x *KeyValue) GetSintValue() int64 { + if x, ok := x.GetValue().(*KeyValue_SintValue); ok { return x.SintValue } return 0 } -func (m *KeyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*KeyValue_BoolValue); ok { +func (x *KeyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*KeyValue_BoolValue); ok { return x.BoolValue } return false } -func (m *KeyValue) GetStrValue() string { - if x, ok := m.GetValue().(*KeyValue_StrValue); ok { +func (x *KeyValue) GetStrValue() string { + if x, ok := x.GetValue().(*KeyValue_StrValue); ok { return x.StrValue } return "" } -func (m *KeyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*KeyValue_BytesValue); ok { +func (x *KeyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*KeyValue_BytesValue); ok { return x.BytesValue } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*KeyValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _KeyValue_OneofMarshaler, _KeyValue_OneofUnmarshaler, _KeyValue_OneofSizer, []interface{}{ - (*KeyValue_DoubleValue)(nil), - (*KeyValue_IntValue)(nil), - (*KeyValue_UintValue)(nil), - (*KeyValue_SintValue)(nil), - (*KeyValue_BoolValue)(nil), - (*KeyValue_StrValue)(nil), - (*KeyValue_BytesValue)(nil), - } +type isKeyValue_Value interface { + isKeyValue_Value() } -func _KeyValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *KeyValue_IntValue: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - b.EncodeVarint(8<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.SintValue)) - case *KeyValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *KeyValue_StrValue: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StrValue) - case *KeyValue_BytesValue: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.BytesValue) - case nil: - default: - return fmt.Errorf("KeyValue.Value has unexpected type %T", x) - } - return nil +type KeyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` } -func _KeyValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*KeyValue) - switch tag { - case 5: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &KeyValue_DoubleValue{math.Float64frombits(x)} - return true, err - case 6: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_IntValue{int64(x)} - return true, err - case 7: // value.uint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_UintValue{x} - return true, err - case 8: // value.sint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Value = &KeyValue_SintValue{int64(x)} - return true, err - case 9: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_BoolValue{x != 0} - return true, err - case 10: // value.str_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &KeyValue_StrValue{x} - return true, err - case 11: // value.bytes_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Value = &KeyValue_BytesValue{x} - return true, err - default: - return false, nil - } -} - -func _KeyValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *KeyValue_IntValue: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.SintValue<<1) ^ uint64((int64(x.SintValue) >> 63)))) - case *KeyValue_BoolValue: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += 1 - case *KeyValue_StrValue: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StrValue))) - n += len(x.StrValue) - case *KeyValue_BytesValue: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.BytesValue))) - n += len(x.BytesValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type KeyValue_IntValue struct { + IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type KeyValue_UintValue struct { + UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,proto3,oneof"` +} + +type KeyValue_SintValue struct { + SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,proto3,oneof"` +} + +type KeyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type KeyValue_StrValue struct { + StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,proto3,oneof"` +} + +type KeyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*KeyValue_DoubleValue) isKeyValue_Value() {} + +func (*KeyValue_IntValue) isKeyValue_Value() {} + +func (*KeyValue_UintValue) isKeyValue_Value() {} + +func (*KeyValue_SintValue) isKeyValue_Value() {} + +func (*KeyValue_BoolValue) isKeyValue_Value() {} + +func (*KeyValue_StrValue) isKeyValue_Value() {} + +func (*KeyValue_BytesValue) isKeyValue_Value() {} + // Message indicating delete for a particular path type Delete struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Delete) Reset() { *m = Delete{} } -func (m *Delete) String() string { return proto.CompactTextString(m) } -func (*Delete) ProtoMessage() {} -func (*Delete) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Delete) ProtoMessage() {} -func (m *Delete) GetPath() string { - if m != nil { - return m.Path +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{9} +} + +func (x *Delete) GetPath() string { + if x != nil { + return x.Path } return "" } // Message indicating EOM for a particular path type Eom struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (m *Eom) Reset() { *m = Eom{} } -func (m *Eom) String() string { return proto.CompactTextString(m) } -func (*Eom) ProtoMessage() {} -func (*Eom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *Eom) Reset() { + *x = Eom{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Eom) GetPath() string { - if m != nil { - return m.Path +func (x *Eom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eom) ProtoMessage() {} + +func (x *Eom) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eom.ProtoReflect.Descriptor instead. +func (*Eom) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{10} +} + +func (x *Eom) GetPath() string { + if x != nil { + return x.Path } return "" } // Message sent for a telemetry subscription cancellation request type CancelSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *CancelSubscriptionRequest) Reset() { *m = CancelSubscriptionRequest{} } -func (m *CancelSubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionRequest) ProtoMessage() {} -func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *CancelSubscriptionRequest) Reset() { + *x = CancelSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *CancelSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionRequest) ProtoMessage() {} + +func (x *CancelSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{11} +} + +func (x *CancelSubscriptionRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription cancellation request type CancelSubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Return code - Code ReturnCode `protobuf:"varint,1,opt,name=code,enum=telemetry.ReturnCode" json:"code,omitempty"` + Code ReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=telemetry.ReturnCode" json:"code,omitempty"` // Return code string - CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr" json:"code_str,omitempty"` + CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr,proto3" json:"code_str,omitempty"` } -func (m *CancelSubscriptionReply) Reset() { *m = CancelSubscriptionReply{} } -func (m *CancelSubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionReply) ProtoMessage() {} -func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *CancelSubscriptionReply) Reset() { + *x = CancelSubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionReply) GetCode() ReturnCode { - if m != nil { - return m.Code +func (x *CancelSubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionReply) ProtoMessage() {} + +func (x *CancelSubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionReply.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{12} +} + +func (x *CancelSubscriptionReply) GetCode() ReturnCode { + if x != nil { + return x.Code } return ReturnCode_SUCCESS } -func (m *CancelSubscriptionReply) GetCodeStr() string { - if m != nil { - return m.CodeStr +func (x *CancelSubscriptionReply) GetCodeStr() string { + if x != nil { + return x.CodeStr } return "" } // Message sent for a telemetry get request type GetSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested // --- or --- // 0xFFFFFFFF for all subscription identifiers - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *GetSubscriptionsRequest) Reset() { *m = GetSubscriptionsRequest{} } -func (m *GetSubscriptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsRequest) ProtoMessage() {} -func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *GetSubscriptionsRequest) Reset() { + *x = GetSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsRequest) ProtoMessage() {} + +func (x *GetSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{13} +} + +func (x *GetSubscriptionsRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription get request type GetSubscriptionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of current telemetry subscriptions - SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList" json:"subscription_list,omitempty"` + SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList,proto3" json:"subscription_list,omitempty"` } -func (m *GetSubscriptionsReply) Reset() { *m = GetSubscriptionsReply{} } -func (m *GetSubscriptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsReply) ProtoMessage() {} -func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *GetSubscriptionsReply) Reset() { + *x = GetSubscriptionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { - if m != nil { - return m.SubscriptionList +func (x *GetSubscriptionsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsReply) ProtoMessage() {} + +func (x *GetSubscriptionsReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsReply.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{14} +} + +func (x *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { + if x != nil { + return x.SubscriptionList } return nil } // Message sent for telemetry agent operational states request type GetOperationalStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Per-subscription_id level operational state can be requested. // // Subscription identifier as returned by the device when @@ -870,434 +1256,718 @@ type GetOperationalStateRequest struct { // --- or --- // If subscription_id is not present then sent only agent-level // operational stats - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` // Control verbosity of the output - Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` + Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,proto3,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` } -func (m *GetOperationalStateRequest) Reset() { *m = GetOperationalStateRequest{} } -func (m *GetOperationalStateRequest) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateRequest) ProtoMessage() {} -func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *GetOperationalStateRequest) Reset() { + *x = GetOperationalStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetOperationalStateRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetOperationalStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationalStateRequest) ProtoMessage() {} + +func (x *GetOperationalStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationalStateRequest.ProtoReflect.Descriptor instead. +func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetOperationalStateRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } -func (m *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { - if m != nil { - return m.Verbosity +func (x *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { + if x != nil { + return x.Verbosity } return VerbosityLevel_DETAIL } // Reply to telemetry agent operational states request type GetOperationalStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of key-value pairs where // key = operational state definition // value = operational state value - Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` } -func (m *GetOperationalStateReply) Reset() { *m = GetOperationalStateReply{} } -func (m *GetOperationalStateReply) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateReply) ProtoMessage() {} -func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *GetOperationalStateReply) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *GetOperationalStateReply) Reset() { + *x = GetOperationalStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -// Message sent for a data encoding request -type DataEncodingRequest struct { -} - -func (m *DataEncodingRequest) Reset() { *m = DataEncodingRequest{} } -func (m *DataEncodingRequest) String() string { return proto.CompactTextString(m) } -func (*DataEncodingRequest) ProtoMessage() {} -func (*DataEncodingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -// Reply to data encodings supported request -type DataEncodingReply struct { - EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` +func (x *GetOperationalStateReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataEncodingReply) Reset() { *m = DataEncodingReply{} } -func (m *DataEncodingReply) String() string { return proto.CompactTextString(m) } -func (*DataEncodingReply) ProtoMessage() {} -func (*DataEncodingReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*GetOperationalStateReply) ProtoMessage() {} -func (m *DataEncodingReply) GetEncodingList() []EncodingType { - if m != nil { - return m.EncodingList +func (x *GetOperationalStateReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil -} - -func init() { - proto.RegisterType((*SubscriptionRequest)(nil), "telemetry.SubscriptionRequest") - proto.RegisterType((*SubscriptionInput)(nil), "telemetry.SubscriptionInput") - proto.RegisterType((*Collector)(nil), "telemetry.Collector") - proto.RegisterType((*Path)(nil), "telemetry.Path") - proto.RegisterType((*SubscriptionAdditionalConfig)(nil), "telemetry.SubscriptionAdditionalConfig") - proto.RegisterType((*SubscriptionReply)(nil), "telemetry.SubscriptionReply") - proto.RegisterType((*SubscriptionResponse)(nil), "telemetry.SubscriptionResponse") - proto.RegisterType((*OpenConfigData)(nil), "telemetry.OpenConfigData") - proto.RegisterType((*KeyValue)(nil), "telemetry.KeyValue") - proto.RegisterType((*Delete)(nil), "telemetry.Delete") - proto.RegisterType((*Eom)(nil), "telemetry.Eom") - proto.RegisterType((*CancelSubscriptionRequest)(nil), "telemetry.CancelSubscriptionRequest") - proto.RegisterType((*CancelSubscriptionReply)(nil), "telemetry.CancelSubscriptionReply") - proto.RegisterType((*GetSubscriptionsRequest)(nil), "telemetry.GetSubscriptionsRequest") - proto.RegisterType((*GetSubscriptionsReply)(nil), "telemetry.GetSubscriptionsReply") - proto.RegisterType((*GetOperationalStateRequest)(nil), "telemetry.GetOperationalStateRequest") - proto.RegisterType((*GetOperationalStateReply)(nil), "telemetry.GetOperationalStateReply") - proto.RegisterType((*DataEncodingRequest)(nil), "telemetry.DataEncodingRequest") - proto.RegisterType((*DataEncodingReply)(nil), "telemetry.DataEncodingReply") - proto.RegisterEnum("telemetry.ReturnCode", ReturnCode_name, ReturnCode_value) - proto.RegisterEnum("telemetry.VerbosityLevel", VerbosityLevel_name, VerbosityLevel_value) - proto.RegisterEnum("telemetry.EncodingType", EncodingType_name, EncodingType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OpenConfigTelemetry service - -type OpenConfigTelemetryClient interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) + return mi.MessageOf(x) } -type openConfigTelemetryClient struct { - cc *grpc.ClientConn +// Deprecated: Use GetOperationalStateReply.ProtoReflect.Descriptor instead. +func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{16} } -func NewOpenConfigTelemetryClient(cc *grpc.ClientConn) OpenConfigTelemetryClient { - return &openConfigTelemetryClient{cc} -} - -func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { - stream, err := grpc.NewClientStream(ctx, &_OpenConfigTelemetry_serviceDesc.Streams[0], c.cc, "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) - if err != nil { - return nil, err - } - x := &openConfigTelemetryTelemetrySubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err +func (x *GetOperationalStateReply) GetKv() []*KeyValue { + if x != nil { + return x.Kv } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenConfigTelemetry_TelemetrySubscribeClient interface { - Recv() (*OpenConfigData, error) - grpc.ClientStream + return nil } -type openConfigTelemetryTelemetrySubscribeClient struct { - grpc.ClientStream +// Message sent for a data encoding request +type DataEncodingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { - m := new(OpenConfigData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DataEncodingRequest) Reset() { + *x = DataEncodingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { - out := new(CancelSubscriptionReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DataEncodingRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { - out := new(GetSubscriptionsReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DataEncodingRequest) ProtoMessage() {} -func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { - out := new(GetOperationalStateReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *DataEncodingRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { - out := new(DataEncodingReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use DataEncodingRequest.ProtoReflect.Descriptor instead. +func (*DataEncodingRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{17} } -// Server API for OpenConfigTelemetry service - -type OpenConfigTelemetryServer interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) -} +// Reply to data encodings supported request +type DataEncodingReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterOpenConfigTelemetryServer(s *grpc.Server, srv OpenConfigTelemetryServer) { - s.RegisterService(&_OpenConfigTelemetry_serviceDesc, srv) + EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,proto3,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` } -func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscriptionRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *DataEncodingReply) Reset() { + *x = DataEncodingReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) } -type OpenConfigTelemetry_TelemetrySubscribeServer interface { - Send(*OpenConfigData) error - grpc.ServerStream +func (x *DataEncodingReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type openConfigTelemetryTelemetrySubscribeServer struct { - grpc.ServerStream -} +func (*DataEncodingReply) ProtoMessage() {} -func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { - return x.ServerStream.SendMsg(m) -} - -func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/CancelTelemetrySubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) +func (x *DataEncodingReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetrySubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DataEncodingReply.ProtoReflect.Descriptor instead. +func (*DataEncodingReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{18} } -func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationalStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) +func (x *DataEncodingReply) GetEncodingList() []EncodingType { + if x != nil { + return x.EncodingList } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetryOperationalState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DataEncodingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetDataEncodings", +var File_oc_oc_proto protoreflect.FileDescriptor + +var file_oc_oc_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6f, 0x63, 0x2f, 0x6f, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xcd, 0x01, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, + 0x0e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x09, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x75, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x55, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x6c, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x6d, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x6d, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x10, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x73, 0x22, 0x7e, + 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xec, 0x02, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x29, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x6f, 0x6d, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x45, 0x6f, 0x6d, 0x52, 0x03, 0x65, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e, + 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x75, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x09, 0x75, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x1c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x19, 0x0a, + 0x03, 0x45, 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x44, 0x0a, 0x19, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5f, + 0x0a, 0x17, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, + 0x42, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x11, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x09, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x47, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4e, 0x4f, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x45, 0x52, + 0x53, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x52, 0x49, 0x45, 0x46, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x0c, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x58, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x49, 0x45, 0x54, 0x46, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, + 0x10, 0x03, 0x32, 0xfc, 0x03, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x70, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x19, 0x67, 0x65, + 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x10, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oc_oc_proto_rawDescOnce sync.Once + file_oc_oc_proto_rawDescData = file_oc_oc_proto_rawDesc +) + +func file_oc_oc_proto_rawDescGZIP() []byte { + file_oc_oc_proto_rawDescOnce.Do(func() { + file_oc_oc_proto_rawDescData = protoimpl.X.CompressGZIP(file_oc_oc_proto_rawDescData) + }) + return file_oc_oc_proto_rawDescData +} + +var file_oc_oc_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_oc_oc_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_oc_oc_proto_goTypes = []interface{}{ + (ReturnCode)(0), // 0: telemetry.ReturnCode + (VerbosityLevel)(0), // 1: telemetry.VerbosityLevel + (EncodingType)(0), // 2: telemetry.EncodingType + (*SubscriptionRequest)(nil), // 3: telemetry.SubscriptionRequest + (*SubscriptionInput)(nil), // 4: telemetry.SubscriptionInput + (*Collector)(nil), // 5: telemetry.Collector + (*Path)(nil), // 6: telemetry.Path + (*SubscriptionAdditionalConfig)(nil), // 7: telemetry.SubscriptionAdditionalConfig + (*SubscriptionReply)(nil), // 8: telemetry.SubscriptionReply + (*SubscriptionResponse)(nil), // 9: telemetry.SubscriptionResponse + (*OpenConfigData)(nil), // 10: telemetry.OpenConfigData + (*KeyValue)(nil), // 11: telemetry.KeyValue + (*Delete)(nil), // 12: telemetry.Delete + (*Eom)(nil), // 13: telemetry.Eom + (*CancelSubscriptionRequest)(nil), // 14: telemetry.CancelSubscriptionRequest + (*CancelSubscriptionReply)(nil), // 15: telemetry.CancelSubscriptionReply + (*GetSubscriptionsRequest)(nil), // 16: telemetry.GetSubscriptionsRequest + (*GetSubscriptionsReply)(nil), // 17: telemetry.GetSubscriptionsReply + (*GetOperationalStateRequest)(nil), // 18: telemetry.GetOperationalStateRequest + (*GetOperationalStateReply)(nil), // 19: telemetry.GetOperationalStateReply + (*DataEncodingRequest)(nil), // 20: telemetry.DataEncodingRequest + (*DataEncodingReply)(nil), // 21: telemetry.DataEncodingReply +} +var file_oc_oc_proto_depIdxs = []int32{ + 4, // 0: telemetry.SubscriptionRequest.input:type_name -> telemetry.SubscriptionInput + 6, // 1: telemetry.SubscriptionRequest.path_list:type_name -> telemetry.Path + 7, // 2: telemetry.SubscriptionRequest.additional_config:type_name -> telemetry.SubscriptionAdditionalConfig + 5, // 3: telemetry.SubscriptionInput.collector_list:type_name -> telemetry.Collector + 9, // 4: telemetry.SubscriptionReply.response:type_name -> telemetry.SubscriptionResponse + 6, // 5: telemetry.SubscriptionReply.path_list:type_name -> telemetry.Path + 11, // 6: telemetry.OpenConfigData.kv:type_name -> telemetry.KeyValue + 12, // 7: telemetry.OpenConfigData.delete:type_name -> telemetry.Delete + 13, // 8: telemetry.OpenConfigData.eom:type_name -> telemetry.Eom + 0, // 9: telemetry.CancelSubscriptionReply.code:type_name -> telemetry.ReturnCode + 8, // 10: telemetry.GetSubscriptionsReply.subscription_list:type_name -> telemetry.SubscriptionReply + 1, // 11: telemetry.GetOperationalStateRequest.verbosity:type_name -> telemetry.VerbosityLevel + 11, // 12: telemetry.GetOperationalStateReply.kv:type_name -> telemetry.KeyValue + 2, // 13: telemetry.DataEncodingReply.encoding_list:type_name -> telemetry.EncodingType + 3, // 14: telemetry.OpenConfigTelemetry.telemetrySubscribe:input_type -> telemetry.SubscriptionRequest + 14, // 15: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:input_type -> telemetry.CancelSubscriptionRequest + 16, // 16: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:input_type -> telemetry.GetSubscriptionsRequest + 18, // 17: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:input_type -> telemetry.GetOperationalStateRequest + 20, // 18: telemetry.OpenConfigTelemetry.getDataEncodings:input_type -> telemetry.DataEncodingRequest + 10, // 19: telemetry.OpenConfigTelemetry.telemetrySubscribe:output_type -> telemetry.OpenConfigData + 15, // 20: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:output_type -> telemetry.CancelSubscriptionReply + 17, // 21: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:output_type -> telemetry.GetSubscriptionsReply + 19, // 22: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:output_type -> telemetry.GetOperationalStateReply + 21, // 23: telemetry.OpenConfigTelemetry.getDataEncodings:output_type -> telemetry.DataEncodingReply + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_oc_oc_proto_init() } +func file_oc_oc_proto_init() { + if File_oc_oc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oc_oc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionAdditionalConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenConfigData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + file_oc_oc_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*KeyValue_DoubleValue)(nil), + (*KeyValue_IntValue)(nil), + (*KeyValue_UintValue)(nil), + (*KeyValue_SintValue)(nil), + (*KeyValue_BoolValue)(nil), + (*KeyValue_StrValue)(nil), + (*KeyValue_BytesValue)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _OpenConfigTelemetry_serviceDesc = grpc.ServiceDesc{ - ServiceName: "telemetry.OpenConfigTelemetry", - HandlerType: (*OpenConfigTelemetryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "cancelTelemetrySubscription", - Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, - }, - { - MethodName: "getTelemetrySubscriptions", - Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, - }, - { - MethodName: "getTelemetryOperationalState", - Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, - }, - { - MethodName: "getDataEncodings", - Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "telemetrySubscribe", - Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, - ServerStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oc_oc_proto_rawDesc, + NumEnums: 3, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "oc.proto", -} - -func init() { proto.RegisterFile("oc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x25, 0xd9, 0x12, 0xaf, 0x7e, 0x42, 0x8d, 0xe3, 0x2f, 0xb2, 0xa3, 0xaf, 0x71, 0xe8, - 0x16, 0x71, 0x82, 0xd4, 0x28, 0x94, 0x45, 0x51, 0xa4, 0x40, 0x10, 0xcb, 0x74, 0xac, 0xc6, 0x95, - 0xdc, 0xa1, 0x9c, 0xb6, 0x2b, 0x82, 0x22, 0x27, 0x36, 0x11, 0xfe, 0x95, 0x33, 0x12, 0xc2, 0x4d, - 0x9e, 0xa0, 0xe8, 0x9b, 0x75, 0xdd, 0x97, 0xe8, 0x23, 0x74, 0x51, 0xcc, 0x90, 0x94, 0x46, 0x89, - 0x94, 0x34, 0x2b, 0x91, 0xe7, 0x9e, 0xb9, 0xf7, 0xcc, 0xbd, 0x67, 0x86, 0x82, 0x7a, 0xe4, 0x1c, - 0xc7, 0x49, 0xc4, 0x22, 0xa4, 0x32, 0xe2, 0x93, 0x80, 0xb0, 0x24, 0xd5, 0xff, 0x54, 0x60, 0xc7, - 0x9c, 0x4d, 0xa9, 0x93, 0x78, 0x31, 0xf3, 0xa2, 0x10, 0x93, 0xdf, 0x66, 0x84, 0x32, 0xd4, 0x87, - 0x2d, 0x2f, 0x8c, 0x67, 0xac, 0xab, 0x1c, 0x28, 0x47, 0x8d, 0x7e, 0xef, 0x78, 0xb1, 0xe4, 0x58, - 0xa6, 0x0f, 0x39, 0x07, 0x67, 0x54, 0xf4, 0x18, 0xd4, 0xd8, 0x66, 0x37, 0x96, 0xef, 0x51, 0xd6, - 0x2d, 0x1f, 0x54, 0x8e, 0x1a, 0xfd, 0x5b, 0xd2, 0xba, 0x4b, 0x9b, 0xdd, 0xe0, 0x3a, 0x67, 0x5c, - 0x78, 0x94, 0xa1, 0x09, 0x74, 0x6c, 0xd7, 0xf5, 0x78, 0x16, 0xdb, 0xb7, 0x9c, 0x28, 0x7c, 0xed, - 0x5d, 0x77, 0x2b, 0xa2, 0xda, 0x83, 0x0d, 0xd5, 0x9e, 0x2f, 0xf8, 0x03, 0x41, 0xc7, 0x9a, 0xfd, - 0x1e, 0xa2, 0x5f, 0x42, 0xe7, 0x03, 0x7d, 0xe8, 0x29, 0xb4, 0x9d, 0xc8, 0xf7, 0x89, 0xc3, 0xa2, - 0x24, 0x53, 0xa7, 0x08, 0x75, 0xb7, 0xa5, 0x3a, 0x83, 0x82, 0x80, 0x5b, 0x0b, 0x2e, 0xd7, 0xa9, - 0x7f, 0x07, 0xea, 0x22, 0x86, 0xba, 0x50, 0xb3, 0x5d, 0x37, 0x21, 0x94, 0x8a, 0xc6, 0xa8, 0xb8, - 0x78, 0x45, 0x08, 0xaa, 0x71, 0x94, 0xf0, 0x7d, 0x2b, 0x47, 0x2d, 0x2c, 0x9e, 0xf5, 0xbf, 0x14, - 0xa8, 0xf2, 0x5d, 0x8b, 0xa0, 0xcd, 0x6e, 0xf2, 0x35, 0xe2, 0x19, 0xfd, 0x0f, 0xb6, 0x5f, 0x7b, - 0x3e, 0x23, 0x89, 0x58, 0xa2, 0xe2, 0xfc, 0x0d, 0x7d, 0x0d, 0x88, 0xce, 0xe2, 0x98, 0x27, 0xb5, - 0x66, 0xa1, 0x73, 0x63, 0x87, 0xd7, 0xc4, 0x15, 0x8d, 0xa9, 0xe3, 0x4e, 0x11, 0xb9, 0x2a, 0x02, - 0xe8, 0x18, 0x76, 0x02, 0xfb, 0xad, 0x45, 0x3d, 0x9f, 0x84, 0xcc, 0xf2, 0x42, 0x46, 0x92, 0xb9, - 0xed, 0x77, 0xab, 0x42, 0x46, 0x27, 0xb0, 0xdf, 0x9a, 0x22, 0x32, 0xcc, 0x03, 0xe8, 0x21, 0x68, - 0xd4, 0x0e, 0x62, 0x9f, 0x58, 0xaf, 0x13, 0x3e, 0xeb, 0xd0, 0x49, 0xbb, 0x5b, 0x82, 0x7c, 0x2b, - 0xc3, 0xcf, 0x0a, 0x18, 0xed, 0x41, 0x3d, 0x24, 0xc4, 0xb5, 0x48, 0x14, 0x74, 0xb7, 0x45, 0xfd, - 0x1a, 0x7f, 0x37, 0xa2, 0x40, 0xff, 0x5d, 0x81, 0xde, 0xc7, 0x26, 0x83, 0x0e, 0xa1, 0xe5, 0x7b, - 0x81, 0xc7, 0xac, 0x84, 0x38, 0x51, 0xe2, 0x66, 0xed, 0xda, 0xc2, 0x4d, 0x01, 0xe2, 0x0c, 0x43, - 0x8f, 0x01, 0x65, 0x24, 0xe6, 0x05, 0xc4, 0xa2, 0xc4, 0x89, 0x42, 0x97, 0x8a, 0x76, 0x6c, 0x61, - 0x4d, 0x44, 0x26, 0x5e, 0x40, 0xcc, 0x0c, 0x97, 0xe4, 0xd0, 0xbc, 0x1d, 0xb9, 0x1c, 0xaa, 0xbf, - 0x5b, 0x9d, 0x3a, 0x26, 0xb1, 0x9f, 0xa2, 0xa7, 0x50, 0x4f, 0x08, 0x8d, 0xa3, 0x90, 0x92, 0xdc, - 0xc5, 0xf7, 0x36, 0xf8, 0x0a, 0xe7, 0x34, 0xbc, 0x58, 0xf0, 0x79, 0x5e, 0xd6, 0x9f, 0xc1, 0xed, - 0x75, 0xf9, 0xd0, 0x03, 0xb8, 0x45, 0x25, 0xdc, 0xf2, 0x5c, 0xa1, 0xa4, 0x85, 0xdb, 0x32, 0x3c, - 0x74, 0xf5, 0xbf, 0xcb, 0xd0, 0x1e, 0xc7, 0x24, 0xcc, 0xba, 0x77, 0x6a, 0x33, 0x1b, 0xdd, 0x05, - 0x95, 0xa6, 0x94, 0x91, 0xa0, 0x58, 0xa5, 0xe2, 0x7a, 0x06, 0x0c, 0x5d, 0x74, 0x1f, 0x9a, 0x4e, - 0x14, 0xc4, 0x51, 0x28, 0x86, 0xee, 0xe6, 0xae, 0x6b, 0x2c, 0xb0, 0xa1, 0x8b, 0x8e, 0x40, 0xa3, - 0xb3, 0xa9, 0xb5, 0x42, 0xab, 0x2c, 0x8a, 0x0f, 0x24, 0x66, 0xe1, 0xce, 0xaa, 0xe4, 0x4e, 0xae, - 0x3c, 0xf3, 0x01, 0xb1, 0xc2, 0x59, 0x30, 0x25, 0x89, 0x70, 0x49, 0x15, 0xb7, 0x0b, 0x78, 0x24, - 0x50, 0xd4, 0x03, 0x95, 0x4f, 0x8f, 0x32, 0x3b, 0x88, 0x85, 0x4b, 0xaa, 0x78, 0x09, 0xa0, 0x43, - 0x28, 0xbf, 0x99, 0x77, 0x6b, 0xa2, 0x7f, 0x3b, 0x52, 0xff, 0x5e, 0x92, 0xf4, 0x95, 0xed, 0xcf, - 0x08, 0x2e, 0xbf, 0x99, 0xa3, 0x87, 0xb0, 0xed, 0x12, 0x9f, 0x30, 0xd2, 0xad, 0x0b, 0x62, 0x47, - 0x22, 0x9e, 0x8a, 0x00, 0xce, 0x09, 0xe8, 0x00, 0x2a, 0xdc, 0x8d, 0xaa, 0xe0, 0xb5, 0x25, 0x9e, - 0x11, 0x05, 0x98, 0x87, 0xb8, 0xf1, 0x68, 0x1a, 0x3a, 0xd6, 0x62, 0xf4, 0x20, 0xac, 0xd2, 0xe4, - 0x60, 0x31, 0x17, 0xfd, 0x8f, 0x32, 0xd4, 0x0b, 0x09, 0x48, 0x83, 0xca, 0x1b, 0x92, 0xe6, 0x2d, - 0xe6, 0x8f, 0xe8, 0x10, 0x9a, 0x6e, 0x34, 0x9b, 0xfa, 0xc4, 0x9a, 0x73, 0x86, 0xd8, 0xb9, 0x72, - 0x5e, 0xc2, 0x8d, 0x0c, 0xcd, 0x96, 0xfd, 0x1f, 0x54, 0x2f, 0x64, 0x39, 0x83, 0x6f, 0xbc, 0x72, - 0x5e, 0xc2, 0x75, 0x2f, 0x64, 0x59, 0xf8, 0x1e, 0xc0, 0x6c, 0x19, 0xaf, 0xf1, 0xc6, 0x9c, 0x97, - 0xb0, 0x3a, 0x93, 0x09, 0x74, 0x49, 0xa8, 0x1f, 0x28, 0x47, 0x88, 0x13, 0xa8, 0x4c, 0x98, 0x46, - 0x91, 0x9f, 0x13, 0x54, 0xbe, 0x0d, 0x4e, 0xe0, 0xd8, 0x42, 0x01, 0x65, 0x49, 0x1e, 0xe7, 0xdb, - 0x54, 0xb9, 0x02, 0xca, 0x92, 0x2c, 0x7c, 0x1f, 0x1a, 0xd3, 0x94, 0x11, 0x9a, 0x13, 0x1a, 0x07, - 0xca, 0x51, 0xf3, 0xbc, 0x84, 0x41, 0x80, 0x82, 0x72, 0x52, 0x83, 0x2d, 0x11, 0xd4, 0x7b, 0xb0, - 0x9d, 0x75, 0x7a, 0xdd, 0x55, 0xa5, 0xef, 0x41, 0xc5, 0x88, 0x82, 0xb5, 0xa1, 0x53, 0xd8, 0x1b, - 0xd8, 0xa1, 0x43, 0xfc, 0x75, 0x1f, 0x91, 0xff, 0x6c, 0x7f, 0x0b, 0xee, 0xac, 0xcb, 0xc2, 0x4f, - 0xf1, 0x43, 0xa8, 0x3a, 0x91, 0x9b, 0x9d, 0xe0, 0x76, 0x7f, 0x57, 0x1a, 0x39, 0x26, 0x6c, 0x96, - 0x84, 0x83, 0xc8, 0x25, 0x58, 0x50, 0xf8, 0x05, 0xc1, 0x7f, 0x2d, 0xca, 0x8a, 0x3b, 0xb5, 0xc6, - 0xdf, 0x4d, 0x96, 0xe8, 0x27, 0x70, 0xe7, 0x05, 0x61, 0x72, 0x76, 0xfa, 0xd9, 0x22, 0xa7, 0xb0, - 0xfb, 0x61, 0x0e, 0x2e, 0x71, 0x08, 0x9d, 0x95, 0x0c, 0xd2, 0x17, 0xa6, 0xb7, 0xf1, 0xc6, 0x89, - 0xfd, 0x14, 0x6b, 0xf2, 0x32, 0x71, 0x91, 0xbc, 0x83, 0xfd, 0x17, 0x84, 0x8d, 0x63, 0x92, 0xd8, - 0xd9, 0x75, 0x6a, 0x32, 0x9b, 0x91, 0xcf, 0x95, 0x8a, 0xbe, 0x05, 0x75, 0x4e, 0x92, 0x69, 0x44, - 0x3d, 0x96, 0x8a, 0x56, 0xb4, 0xfb, 0x7b, 0x92, 0x92, 0x57, 0x45, 0xec, 0x82, 0xcc, 0x89, 0x8f, - 0x97, 0x5c, 0xfd, 0x19, 0x74, 0xd7, 0xd6, 0xe7, 0xdb, 0xcc, 0xce, 0xb2, 0xf2, 0xd1, 0xb3, 0xac, - 0xef, 0xc2, 0x0e, 0xbf, 0xbd, 0x8c, 0xd0, 0x89, 0x5c, 0x2f, 0xbc, 0xce, 0x95, 0xeb, 0x3f, 0x41, - 0x67, 0x15, 0xe6, 0x09, 0xbf, 0x87, 0x16, 0xc9, 0x81, 0x65, 0xcf, 0xda, 0xfd, 0x3b, 0xf2, 0xb1, - 0xce, 0xe3, 0x93, 0x34, 0x26, 0xb8, 0x59, 0xb0, 0x79, 0xab, 0x1e, 0xbd, 0x00, 0x58, 0x3a, 0x00, - 0x35, 0xa0, 0x66, 0x5e, 0x0d, 0x06, 0x86, 0x69, 0x6a, 0x25, 0xb4, 0x07, 0xbb, 0xa3, 0xb1, 0x65, - 0x5e, 0x9d, 0x98, 0x03, 0x3c, 0xbc, 0x9c, 0x0c, 0xc7, 0x23, 0xcb, 0x18, 0x4d, 0xf0, 0xaf, 0x9a, - 0x82, 0x3a, 0xd0, 0xba, 0x1a, 0xbd, 0x1c, 0x8d, 0x7f, 0x1e, 0x59, 0x06, 0xc6, 0x63, 0xac, 0x95, - 0x1f, 0xf5, 0xa1, 0xbd, 0xda, 0x10, 0x04, 0xb0, 0x7d, 0x6a, 0x4c, 0x9e, 0x0f, 0x2f, 0xb4, 0x12, - 0x52, 0x61, 0x6b, 0x62, 0x60, 0xd3, 0xd0, 0x14, 0xfe, 0x78, 0x82, 0x87, 0xc6, 0x99, 0x56, 0x7e, - 0xf4, 0x1c, 0x9a, 0xb2, 0x34, 0xd4, 0x02, 0xf5, 0x6a, 0x74, 0x6a, 0x9c, 0x0d, 0x47, 0xc6, 0xa9, - 0x56, 0x42, 0x35, 0xa8, 0xfc, 0xf2, 0xe3, 0x85, 0xa6, 0x70, 0xfc, 0x07, 0x73, 0x3c, 0xb2, 0x86, - 0xc6, 0xe4, 0x4c, 0x2b, 0xf3, 0xc4, 0x97, 0x78, 0x3c, 0x19, 0x3f, 0xd1, 0x2a, 0xfd, 0x7f, 0x2a, - 0xb0, 0xb3, 0xbc, 0xf2, 0x27, 0xc5, 0x96, 0x91, 0x09, 0x68, 0xb1, 0xff, 0xdc, 0x32, 0x53, 0x82, - 0xbe, 0xd8, 0x68, 0x24, 0xd1, 0xe0, 0x7d, 0x79, 0xbc, 0xab, 0x1f, 0x12, 0xbd, 0xf4, 0x8d, 0x82, - 0x3c, 0xb8, 0xeb, 0x88, 0x03, 0x36, 0x79, 0x2f, 0xb5, 0x48, 0x82, 0xbe, 0x94, 0xff, 0x08, 0x6d, - 0x3a, 0xce, 0xfb, 0xfa, 0x27, 0x58, 0xb1, 0x9f, 0xea, 0x25, 0xe4, 0xc0, 0xde, 0x35, 0x61, 0x6b, - 0xeb, 0x50, 0x24, 0xa7, 0xd8, 0x70, 0x20, 0xf7, 0x0f, 0x3e, 0xca, 0xc9, 0x8a, 0xf8, 0xd0, 0x93, - 0x8b, 0xbc, 0x6f, 0x58, 0xf4, 0xd5, 0x6a, 0x8e, 0x0d, 0x07, 0x6a, 0xff, 0xf0, 0x53, 0xb4, 0xac, - 0x1a, 0x06, 0xed, 0x9a, 0x30, 0xd9, 0xc0, 0x74, 0x65, 0x20, 0x6b, 0x1c, 0xbf, 0xdf, 0xdb, 0x18, - 0x17, 0x39, 0xa7, 0xdb, 0xe2, 0xaf, 0xf8, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xe3, - 0x4f, 0x0d, 0x96, 0x0b, 0x00, 0x00, + GoTypes: file_oc_oc_proto_goTypes, + DependencyIndexes: file_oc_oc_proto_depIdxs, + EnumInfos: file_oc_oc_proto_enumTypes, + MessageInfos: file_oc_oc_proto_msgTypes, + }.Build() + File_oc_oc_proto = out.File + file_oc_oc_proto_rawDesc = nil + file_oc_oc_proto_goTypes = nil + file_oc_oc_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index cf4aa145e6911..8c3ad32b9913f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -36,6 +36,7 @@ syntax = "proto3"; package telemetry; +option go_package = ".;telemetry"; // Interface exported by Agent service OpenConfigTelemetry { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go new file mode 100644 index 0000000000000..593e5a1e1002a --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package telemetry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OpenConfigTelemetryClient is the client API for OpenConfigTelemetry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenConfigTelemetryClient interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) +} + +type openConfigTelemetryClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenConfigTelemetryClient(cc grpc.ClientConnInterface) OpenConfigTelemetryClient { + return &openConfigTelemetryClient{cc} +} + +func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenConfigTelemetry_ServiceDesc.Streams[0], "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) + if err != nil { + return nil, err + } + x := &openConfigTelemetryTelemetrySubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenConfigTelemetry_TelemetrySubscribeClient interface { + Recv() (*OpenConfigData, error) + grpc.ClientStream +} + +type openConfigTelemetryTelemetrySubscribeClient struct { + grpc.ClientStream +} + +func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { + m := new(OpenConfigData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { + out := new(CancelSubscriptionReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { + out := new(GetSubscriptionsReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { + out := new(GetOperationalStateReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { + out := new(DataEncodingReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OpenConfigTelemetryServer is the server API for OpenConfigTelemetry service. +// All implementations must embed UnimplementedOpenConfigTelemetryServer +// for forward compatibility +type OpenConfigTelemetryServer interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +// UnimplementedOpenConfigTelemetryServer must be embedded to have forward compatible implementations. +type UnimplementedOpenConfigTelemetryServer struct { +} + +func (UnimplementedOpenConfigTelemetryServer) TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method TelemetrySubscribe not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelTelemetrySubscription not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetrySubscriptions not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetryOperationalState not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataEncodings not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) mustEmbedUnimplementedOpenConfigTelemetryServer() {} + +// UnsafeOpenConfigTelemetryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenConfigTelemetryServer will +// result in compilation errors. +type UnsafeOpenConfigTelemetryServer interface { + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +func RegisterOpenConfigTelemetryServer(s grpc.ServiceRegistrar, srv OpenConfigTelemetryServer) { + s.RegisterService(&OpenConfigTelemetry_ServiceDesc, srv) +} + +func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) +} + +type OpenConfigTelemetry_TelemetrySubscribeServer interface { + Send(*OpenConfigData) error + grpc.ServerStream +} + +type openConfigTelemetryTelemetrySubscribeServer struct { + grpc.ServerStream +} + +func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { + return x.ServerStream.SendMsg(m) +} + +func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationalStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataEncodingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getDataEncodings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OpenConfigTelemetry_ServiceDesc is the grpc.ServiceDesc for OpenConfigTelemetry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenConfigTelemetry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "telemetry.OpenConfigTelemetry", + HandlerType: (*OpenConfigTelemetryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "cancelTelemetrySubscription", + Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, + }, + { + MethodName: "getTelemetrySubscriptions", + Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, + }, + { + MethodName: "getTelemetryOperationalState", + Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, + }, + { + MethodName: "getDataEncodings", + Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "telemetrySubscribe", + Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "oc/oc.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 8db4ce0d543bc..9fed6a324bf34 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -47,6 +47,7 @@ var dataWithStringValues = &telemetry.OpenConfigData{ } type openConfigTelemetryServer struct { + telemetry.UnimplementedOpenConfigTelemetryServer } func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 741f24d04e75e..0083142fe7f2e 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -6,7 +6,7 @@ and creates metrics using one of the supported [input data formats][]. For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input plugin and use the old zookeeper connection method. -### Configuration +## Configuration ```toml [[inputs.kafka_consumer]] @@ -93,6 +93,15 @@ and use the old zookeeper connection method. ## waiting until the next flush_interval. # max_undelivered_messages = 1000 + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index 70affdc2372b4..777d7261dd175 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -3,13 +3,14 @@ package kafka_consumer import ( "context" "fmt" - "log" "strings" "sync" "time" "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/inputs" @@ -101,6 +102,15 @@ const sampleConfig = ` ## waiting until the next flush_interval. # max_undelivered_messages = 1000 + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -110,6 +120,7 @@ const sampleConfig = ` const ( defaultMaxUndeliveredMessages = 1000 + defaultMaxProcessingTime = config.Duration(100 * time.Millisecond) defaultConsumerGroup = "telegraf_metrics_consumers" reconnectDelay = 5 * time.Second ) @@ -118,14 +129,15 @@ type empty struct{} type semaphore chan empty type KafkaConsumer struct { - Brokers []string `toml:"brokers"` - ConsumerGroup string `toml:"consumer_group"` - MaxMessageLen int `toml:"max_message_len"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - Offset string `toml:"offset"` - BalanceStrategy string `toml:"balance_strategy"` - Topics []string `toml:"topics"` - TopicTag string `toml:"topic_tag"` + Brokers []string `toml:"brokers"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + MaxProcessingTime config.Duration `toml:"max_processing_time"` + Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` + Topics []string `toml:"topics"` + TopicTag string `toml:"topic_tag"` kafka.ReadConfig @@ -147,13 +159,13 @@ type ConsumerGroup interface { } type ConsumerGroupCreator interface { - Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) + Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) } type SaramaCreator struct{} -func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { - return sarama.NewConsumerGroup(brokers, group, config) +func (*SaramaCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, cfg) } func (k *KafkaConsumer) SampleConfig() string { @@ -172,35 +184,38 @@ func (k *KafkaConsumer) Init() error { if k.MaxUndeliveredMessages == 0 { k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages } + if time.Duration(k.MaxProcessingTime) == 0 { + k.MaxProcessingTime = defaultMaxProcessingTime + } if k.ConsumerGroup == "" { k.ConsumerGroup = defaultConsumerGroup } - config := sarama.NewConfig() + cfg := sarama.NewConfig() // Kafka version 0.10.2.0 is required for consumer groups. - config.Version = sarama.V0_10_2_0 + cfg.Version = sarama.V0_10_2_0 - if err := k.SetConfig(config); err != nil { + if err := k.SetConfig(cfg); err != nil { return err } switch strings.ToLower(k.Offset) { case "oldest", "": - config.Consumer.Offsets.Initial = sarama.OffsetOldest + cfg.Consumer.Offsets.Initial = sarama.OffsetOldest case "newest": - config.Consumer.Offsets.Initial = sarama.OffsetNewest + cfg.Consumer.Offsets.Initial = sarama.OffsetNewest default: return fmt.Errorf("invalid offset %q", k.Offset) } switch strings.ToLower(k.BalanceStrategy) { case "range", "": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange case "roundrobin": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin case "sticky": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky default: return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) } @@ -209,7 +224,9 @@ func (k *KafkaConsumer) Init() error { k.ConsumerCreator = &SaramaCreator{} } - k.config = config + cfg.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) + + k.config = cfg return nil } @@ -232,7 +249,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { go func() { defer k.wg.Done() for ctx.Err() == nil { - handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag err := k.consumer.Consume(ctx, k.Topics, handler) @@ -276,12 +293,13 @@ type Message struct { session sarama.ConsumerGroupSession } -func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser, log telegraf.Logger) *ConsumerGroupHandler { handler := &ConsumerGroupHandler{ acc: acc.WithTracking(maxUndelivered), sem: make(chan empty, maxUndelivered), undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), parser: parser, + log: log, } return handler } @@ -299,6 +317,8 @@ type ConsumerGroupHandler struct { mu sync.Mutex undelivered map[telegraf.TrackingID]Message + + log telegraf.Logger } // Setup is called once when a new session is opened. It setups up the handler @@ -335,7 +355,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { msg, ok := h.undelivered[track.ID()] if !ok { - log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + h.log.Errorf("Could not mark message delivered: %d", track.ID()) return } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index c73104278338e..55769a72404df 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -6,12 +6,14 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeConsumerGroup struct { @@ -41,10 +43,10 @@ type FakeCreator struct { ConsumerGroup *FakeConsumerGroup } -func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { +func (c *FakeCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { c.ConsumerGroup.brokers = brokers c.ConsumerGroup.group = group - c.ConsumerGroup.config = config + c.ConsumerGroup.config = cfg return c.ConsumerGroup, nil } @@ -63,6 +65,7 @@ func TestInit(t *testing.T) { require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) require.Equal(t, plugin.config.ClientID, "Telegraf") require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 100*time.Millisecond) }, }, { @@ -164,6 +167,16 @@ func TestInit(t *testing.T) { require.True(t, plugin.config.Net.TLS.Enable) }, }, + { + name: "custom max_processing_time", + plugin: &KafkaConsumer{ + MaxProcessingTime: config.Duration(1000 * time.Millisecond), + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 1000*time.Millisecond) + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -259,7 +272,7 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -274,11 +287,12 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - // This produces a flappy testcase probably due to a race between context cancelation and consumption. + // This produces a flappy testcase probably due to a race between context cancellation and consumption. // Furthermore, it is not clear what the outcome of this test should be... // err = cg.ConsumeClaim(session, &claim) //require.NoError(t, err) // So stick with the line below for now. + //nolint:errcheck cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) @@ -288,7 +302,7 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -402,7 +416,7 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} parser := value.NewValueParser("cpu", "int", "", nil) - cg := NewConsumerGroupHandler(acc, 1, parser) + cg := NewConsumerGroupHandler(acc, 1, parser, testutil.Logger{}) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 2f0c219ea8647..59b1767812c25 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,5 +1,7 @@ # Kafka Consumer Legacy Input Plugin +## Deprecated in version 1.4. Please use [Kafka Consumer input plugin][] + The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka topic and adds messages to InfluxDB. The plugin assumes messages follow the line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) @@ -41,3 +43,5 @@ from the same topic in parallel. Running integration tests requires running Zookeeper & Kafka. See Makefile for kafka container command. + +[Kafka Consumer input plugin]: /plugins/inputs/kafka_consumer diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 976412a7196b5..473c5b9740847 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -6,11 +6,10 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) func TestReadsMetricsFromKafka(t *testing.T) { @@ -51,7 +50,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { var acc testutil.Accumulator // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + require.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { @@ -65,14 +64,14 @@ func TestReadsMetricsFromKafka(t *testing.T) { require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } @@ -84,6 +83,7 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 6a70387ee587b..ece2210e62688 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -2,7 +2,7 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. -### Configuration: +## Configuration ```toml [[inputs.kapacitor]] @@ -23,276 +23,334 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. # insecure_skip_verify = false ``` -### Measurements and fields +## Measurements and fields - [kapacitor](#kapacitor) - - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ - - [num_subscriptions](#num_subscriptions) _(integer)_ - - [num_tasks](#num_tasks) _(integer)_ + - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ + - [num_subscriptions](#num_subscriptions) _(integer)_ + - [num_tasks](#num_tasks) _(integer)_ - [kapacitor_alert](#kapacitor_alert) - - [notification_dropped](#notification_dropped) _(integer)_ - - [primary-handle-count](#primary-handle-count) _(integer)_ - - [secondary-handle-count](#secondary-handle-count) _(integer)_ + - [notification_dropped](#notification_dropped) _(integer)_ + - [primary-handle-count](#primary-handle-count) _(integer)_ + - [secondary-handle-count](#secondary-handle-count) _(integer)_ - (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ - - [dropped_member_events](#dropped_member_events) _(integer)_ - - [dropped_user_events](#dropped_user_events) _(integer)_ - - [query_handler_errors](#query_handler_errors) _(integer)_ + - [dropped_member_events](#dropped_member_events) _(integer)_ + - [dropped_user_events](#dropped_user_events) _(integer)_ + - [query_handler_errors](#query_handler_errors) _(integer)_ - [kapacitor_edges](#kapacitor_edges) - - [collected](#collected) _(integer)_ - - [emitted](#emitted) _(integer)_ + - [collected](#collected) _(integer)_ + - [emitted](#emitted) _(integer)_ - [kapacitor_ingress](#kapacitor_ingress) - - [points_received](#points_received) _(integer)_ + - [points_received](#points_received) _(integer)_ - [kapacitor_load](#kapacitor_load) - - [errors](#errors) _(integer)_ + - [errors](#errors) _(integer)_ - [kapacitor_memstats](#kapacitor_memstats) - - [alloc_bytes](#alloc_bytes) _(integer)_ - - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ - - [frees](#frees) _(integer)_ - - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ - - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ - - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ - - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ - - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ - - [heap_objects](#heap_objects) _(integer)_ - - [heap_released_bytes](#heap_released_bytes) _(integer)_ - - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ - - [last_gc_ns](#last_gc_ns) _(integer)_ - - [lookups](#lookups) _(integer)_ - - [mallocs](#mallocs) _(integer)_ - - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ - - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ - - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ - - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ - - [next_gc_ns](#next_gc_ns) _(integer)_ - - [num_gc](#num_gc) _(integer)_ - - [other_sys_bytes](#other_sys_bytes) _(integer)_ - - [pause_total_ns](#pause_total_ns) _(integer)_ - - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ - - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ - - [sys_bytes](#sys_bytes) _(integer)_ - - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ + - [alloc_bytes](#alloc_bytes) _(integer)_ + - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ + - [frees](#frees) _(integer)_ + - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ + - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ + - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ + - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ + - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ + - [heap_objects](#heap_objects) _(integer)_ + - [heap_released_bytes](#heap_released_bytes) _(integer)_ + - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ + - [last_gc_ns](#last_gc_ns) _(integer)_ + - [lookups](#lookups) _(integer)_ + - [mallocs](#mallocs) _(integer)_ + - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ + - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ + - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ + - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ + - [next_gc_ns](#next_gc_ns) _(integer)_ + - [num_gc](#num_gc) _(integer)_ + - [other_sys_bytes](#other_sys_bytes) _(integer)_ + - [pause_total_ns](#pause_total_ns) _(integer)_ + - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ + - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ + - [sys_bytes](#sys_bytes) _(integer)_ + - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ - [kapacitor_nodes](#kapacitor_nodes) - - [alerts_inhibited](#alerts_inhibited) _(integer)_ - - [alerts_triggered](#alerts_triggered) _(integer)_ - - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ - - [crits_triggered](#crits_triggered) _(integer)_ - - [errors](#errors) _(integer)_ - - [infos_triggered](#infos_triggered) _(integer)_ - - [oks_triggered](#oks_triggered) _(integer)_ - - [points_written](#points_written) _(integer)_ - - [warns_triggered](#warns_triggered) _(integer)_ - - [write_errors](#write_errors) _(integer)_ + - [alerts_inhibited](#alerts_inhibited) _(integer)_ + - [alerts_triggered](#alerts_triggered) _(integer)_ + - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ + - [crits_triggered](#crits_triggered) _(integer)_ + - [errors](#errors) _(integer)_ + - [infos_triggered](#infos_triggered) _(integer)_ + - [oks_triggered](#oks_triggered) _(integer)_ + - [points_written](#points_written) _(integer)_ + - [warns_triggered](#warns_triggered) _(integer)_ + - [write_errors](#write_errors) _(integer)_ - [kapacitor_topics](#kapacitor_topics) - - [collected](#collected) _(integer)_ - + - [collected](#collected) _(integer)_ --- -### kapacitor +## kapacitor + The `kapacitor` measurement stores fields with information related to [Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks) and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/). -#### num_enabled_tasks +### num_enabled_tasks + The number of enabled Kapacitor tasks. -#### num_subscriptions +### num_subscriptions + The number of Kapacitor/InfluxDB subscriptions. -#### num_tasks +### num_tasks + The total number of Kapacitor tasks. --- -### kapacitor_alert +## kapacitor_alert + The `kapacitor_alert` measurement stores fields with information related to [Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). -#### notification-dropped +### notification-dropped + The number of internal notifications dropped because they arrive too late from another Kapacitor node. If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough to keep up with the volume of alerts. -#### primary-handle-count +### primary-handle-count + The number of times this node handled an alert as the primary. This count should increase under normal conditions. -#### secondary-handle-count +### secondary-handle-count + The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner. --- -### kapacitor_cluster +## kapacitor_cluster + The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes. -#### dropped_member_events +### dropped_member_events + The number of gossip member events that were dropped. -#### dropped_user_events +### dropped_user_events + The number of gossip user events that were dropped. --- -### kapacitor_edges +## kapacitor_edges + The `kapacitor_edges` measurement stores fields with information related to [edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) in Kapacitor TICKscripts. -#### collected +### collected + The number of messages collected by TICKscript edges. -#### emitted +### emitted + The number of messages emitted by TICKscript edges. --- -### kapacitor_ingress +## kapacitor_ingress + The `kapacitor_ingress` measurement stores fields with information related to data coming into Kapacitor. -#### points_received +### points_received + The number of points received by Kapacitor. --- -### kapacitor_load +## kapacitor_load + The `kapacitor_load` measurement stores fields with information related to the [Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/). -#### errors +### errors + The number of errors reported from the load directory service. --- -### kapacitor_memstats +## kapacitor_memstats + The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage. -#### alloc_bytes +### alloc_bytes + The number of bytes of memory allocated by Kapacitor that are still in use. -#### buck_hash_sys_bytes +### buck_hash_sys_bytes + The number of bytes of memory used by the profiling bucket hash table. -#### frees +### frees + The number of heap objects freed. -#### gc_sys_bytes +### gc_sys_bytes + The number of bytes of memory used for garbage collection system metadata. -#### gc_cpu_fraction +### gc_cpu_fraction + The fraction of Kapacitor's available CPU time used by garbage collection since Kapacitor started. -#### heap_alloc_bytes +### heap_alloc_bytes + The number of reachable and unreachable heap objects garbage collection has not freed. -#### heap_idle_bytes +### heap_idle_bytes + The number of heap bytes waiting to be used. -#### heap_in_use_bytes +### heap_in_use_bytes + The number of heap bytes in use. -#### heap_objects +### heap_objects + The number of allocated objects. -#### heap_released_bytes +### heap_released_bytes + The number of heap bytes released to the operating system. -#### heap_sys_bytes +### heap_sys_bytes + The number of heap bytes obtained from `system`. -#### last_gc_ns +### last_gc_ns + The nanosecond epoch time of the last garbage collection. -#### lookups +### lookups + The total number of pointer lookups. -#### mallocs +### mallocs + The total number of mallocs. -#### mcache_in_use_bytes +### mcache_in_use_bytes + The number of bytes in use by mcache structures. -#### mcache_sys_bytes +### mcache_sys_bytes + The number of bytes used for mcache structures obtained from `system`. -#### mspan_in_use_bytes +### mspan_in_use_bytes + The number of bytes in use by mspan structures. -#### mspan_sys_bytes +### mspan_sys_bytes + The number of bytes used for mspan structures obtained from `system`. -#### next_gc_ns +### next_gc_ns + The nanosecond epoch time of the next garbage collection. -#### num_gc +### num_gc + The number of completed garbage collection cycles. -#### other_sys_bytes +### other_sys_bytes + The number of bytes used for other system allocations. -#### pause_total_ns +### pause_total_ns + The total number of nanoseconds spent in garbage collection "stop-the-world" pauses since Kapacitor started. -#### stack_in_use_bytes +### stack_in_use_bytes + The number of bytes in use by the stack allocator. -#### stack_sys_bytes +### stack_sys_bytes + The number of bytes obtained from `system` for stack allocator. -#### sys_bytes +### sys_bytes + The number of bytes of memory obtained from `system`. -#### total_alloc_bytes +### total_alloc_bytes + The total number of bytes allocated, even if freed. --- -### kapacitor_nodes +## kapacitor_nodes + The `kapacitor_nodes` measurement stores fields related to events that occur in [TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/). -#### alerts_inhibited +### alerts_inhibited + The total number of alerts inhibited by TICKscripts. -#### alerts_triggered +### alerts_triggered + The total number of alerts triggered by TICKscripts. -#### avg_exec_time_ns +### avg_exec_time_ns + The average execution time of TICKscripts in nanoseconds. -#### crits_triggered +### crits_triggered + The number of critical (`crit`) alerts triggered by TICKscripts. -#### errors +### errors (from TICKscripts) + The number of errors caused caused by TICKscripts. -#### infos_triggered +### infos_triggered + The number of info (`info`) alerts triggered by TICKscripts. -#### oks_triggered +### oks_triggered + The number of ok (`ok`) alerts triggered by TICKscripts. #### points_written + The number of points written to InfluxDB or back to Kapacitor. #### warns_triggered + The number of warning (`warn`) alerts triggered by TICKscripts. #### working_cardinality + The total number of unique series processed. #### write_errors + The number of errors that occurred when writing to InfluxDB or other write endpoints. --- ### kapacitor_topics + The `kapacitor_topics` measurement stores fields related to -Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). +Kapacitor topics](). + +#### collected (kapacitor_topics) -#### collected The number of events collected by Kapacitor topics. --- @@ -303,7 +361,7 @@ these values. ## Example Output -``` +```shell $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test * Plugin: inputs.kapacitor, Collection 1 > kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 diff --git a/plugins/inputs/kernel/README.md b/plugins/inputs/kernel/README.md index 0f28bf7770370..d3467e826db8f 100644 --- a/plugins/inputs/kernel/README.md +++ b/plugins/inputs/kernel/README.md @@ -9,7 +9,7 @@ not covered by other plugins as well as the value of `/proc/sys/kernel/random/en The metrics are documented in `man proc` under the `/proc/stat` section. The metrics are documented in `man 4 random` under the `/proc/stat` section. -``` +```text /proc/sys/kernel/random/entropy_avail @@ -39,7 +39,7 @@ processes 86031 Number of forks since boot. ``` -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/stat @@ -47,24 +47,24 @@ Number of forks since boot. # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel - - boot_time (integer, seconds since epoch, `btime`) - - context_switches (integer, `ctxt`) - - disk_pages_in (integer, `page (0)`) - - disk_pages_out (integer, `page (1)`) - - interrupts (integer, `intr`) - - processes_forked (integer, `processes`) - - entropy_avail (integer, `entropy_available`) + - boot_time (integer, seconds since epoch, `btime`) + - context_switches (integer, `ctxt`) + - disk_pages_in (integer, `page (0)`) + - disk_pages_out (integer, `page (1)`) + - interrupts (integer, `intr`) + - processes_forked (integer, `processes`) + - entropy_avail (integer, `entropy_available`) -### Tags: +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel --test * Plugin: kernel, Collection 1 > kernel entropy_available=2469i,boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816 diff --git a/plugins/inputs/kernel_vmstat/README.md b/plugins/inputs/kernel_vmstat/README.md index 3ca6a097c1456..a5e54f158c4f7 100644 --- a/plugins/inputs/kernel_vmstat/README.md +++ b/plugins/inputs/kernel_vmstat/README.md @@ -1,13 +1,12 @@ # Kernel VMStat Input Plugin -The kernel_vmstat plugin gathers virtual memory statistics -by reading /proc/vmstat. For a full list of available fields see the +The kernel_vmstat plugin gathers virtual memory statistics +by reading /proc/vmstat. For a full list of available fields see the /proc/vmstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). -For a better idea of what each field represents, see the +For a better idea of what each field represents, see the [vmstat man page](http://linux.die.net/man/8/vmstat). - -``` +```text /proc/vmstat kernel/system statistics. Common entries include (from http://www.linuxinsight.com/proc_vmstat.html): @@ -109,7 +108,7 @@ pgrotated 3781 nr_bounce 0 ``` -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/vmstat @@ -117,108 +116,108 @@ nr_bounce 0 # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel_vmstat - - nr_free_pages (integer, `nr_free_pages`) - - nr_inactive_anon (integer, `nr_inactive_anon`) - - nr_active_anon (integer, `nr_active_anon`) - - nr_inactive_file (integer, `nr_inactive_file`) - - nr_active_file (integer, `nr_active_file`) - - nr_unevictable (integer, `nr_unevictable`) - - nr_mlock (integer, `nr_mlock`) - - nr_anon_pages (integer, `nr_anon_pages`) - - nr_mapped (integer, `nr_mapped`) - - nr_file_pages (integer, `nr_file_pages`) - - nr_dirty (integer, `nr_dirty`) - - nr_writeback (integer, `nr_writeback`) - - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) - - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) - - nr_page_table_pages (integer, `nr_page_table_pages`) - - nr_kernel_stack (integer, `nr_kernel_stack`) - - nr_unstable (integer, `nr_unstable`) - - nr_bounce (integer, `nr_bounce`) - - nr_vmscan_write (integer, `nr_vmscan_write`) - - nr_writeback_temp (integer, `nr_writeback_temp`) - - nr_isolated_anon (integer, `nr_isolated_anon`) - - nr_isolated_file (integer, `nr_isolated_file`) - - nr_shmem (integer, `nr_shmem`) - - numa_hit (integer, `numa_hit`) - - numa_miss (integer, `numa_miss`) - - numa_foreign (integer, `numa_foreign`) - - numa_interleave (integer, `numa_interleave`) - - numa_local (integer, `numa_local`) - - numa_other (integer, `numa_other`) - - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) - - pgpgin (integer, `pgpgin`) - - pgpgout (integer, `pgpgout`) - - pswpin (integer, `pswpin`) - - pswpout (integer, `pswpout`) - - pgalloc_dma (integer, `pgalloc_dma`) - - pgalloc_dma32 (integer, `pgalloc_dma32`) - - pgalloc_normal (integer, `pgalloc_normal`) - - pgalloc_movable (integer, `pgalloc_movable`) - - pgfree (integer, `pgfree`) - - pgactivate (integer, `pgactivate`) - - pgdeactivate (integer, `pgdeactivate`) - - pgfault (integer, `pgfault`) - - pgmajfault (integer, `pgmajfault`) - - pgrefill_dma (integer, `pgrefill_dma`) - - pgrefill_dma32 (integer, `pgrefill_dma32`) - - pgrefill_normal (integer, `pgrefill_normal`) - - pgrefill_movable (integer, `pgrefill_movable`) - - pgsteal_dma (integer, `pgsteal_dma`) - - pgsteal_dma32 (integer, `pgsteal_dma32`) - - pgsteal_normal (integer, `pgsteal_normal`) - - pgsteal_movable (integer, `pgsteal_movable`) - - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) - - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) - - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) - - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) - - pgscan_direct_dma (integer, `pgscan_direct_dma`) - - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) - - pgscan_direct_normal (integer, `pgscan_direct_normal`) - - pgscan_direct_movable (integer, `pgscan_direct_movable`) - - zone_reclaim_failed (integer, `zone_reclaim_failed`) - - pginodesteal (integer, `pginodesteal`) - - slabs_scanned (integer, `slabs_scanned`) - - kswapd_steal (integer, `kswapd_steal`) - - kswapd_inodesteal (integer, `kswapd_inodesteal`) - - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) - - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) - - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) - - pageoutrun (integer, `pageoutrun`) - - allocstall (integer, `allocstall`) - - pgrotated (integer, `pgrotated`) - - compact_blocks_moved (integer, `compact_blocks_moved`) - - compact_pages_moved (integer, `compact_pages_moved`) - - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) - - compact_stall (integer, `compact_stall`) - - compact_fail (integer, `compact_fail`) - - compact_success (integer, `compact_success`) - - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) - - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) - - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) - - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) - - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) - - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) - - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) - - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) - - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) - - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) - - thp_fault_alloc (integer, `thp_fault_alloc`) - - thp_fault_fallback (integer, `thp_fault_fallback`) - - thp_collapse_alloc (integer, `thp_collapse_alloc`) - - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) - - thp_split (integer, `thp_split`) - -### Tags: + - nr_free_pages (integer, `nr_free_pages`) + - nr_inactive_anon (integer, `nr_inactive_anon`) + - nr_active_anon (integer, `nr_active_anon`) + - nr_inactive_file (integer, `nr_inactive_file`) + - nr_active_file (integer, `nr_active_file`) + - nr_unevictable (integer, `nr_unevictable`) + - nr_mlock (integer, `nr_mlock`) + - nr_anon_pages (integer, `nr_anon_pages`) + - nr_mapped (integer, `nr_mapped`) + - nr_file_pages (integer, `nr_file_pages`) + - nr_dirty (integer, `nr_dirty`) + - nr_writeback (integer, `nr_writeback`) + - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) + - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) + - nr_page_table_pages (integer, `nr_page_table_pages`) + - nr_kernel_stack (integer, `nr_kernel_stack`) + - nr_unstable (integer, `nr_unstable`) + - nr_bounce (integer, `nr_bounce`) + - nr_vmscan_write (integer, `nr_vmscan_write`) + - nr_writeback_temp (integer, `nr_writeback_temp`) + - nr_isolated_anon (integer, `nr_isolated_anon`) + - nr_isolated_file (integer, `nr_isolated_file`) + - nr_shmem (integer, `nr_shmem`) + - numa_hit (integer, `numa_hit`) + - numa_miss (integer, `numa_miss`) + - numa_foreign (integer, `numa_foreign`) + - numa_interleave (integer, `numa_interleave`) + - numa_local (integer, `numa_local`) + - numa_other (integer, `numa_other`) + - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) + - pgpgin (integer, `pgpgin`) + - pgpgout (integer, `pgpgout`) + - pswpin (integer, `pswpin`) + - pswpout (integer, `pswpout`) + - pgalloc_dma (integer, `pgalloc_dma`) + - pgalloc_dma32 (integer, `pgalloc_dma32`) + - pgalloc_normal (integer, `pgalloc_normal`) + - pgalloc_movable (integer, `pgalloc_movable`) + - pgfree (integer, `pgfree`) + - pgactivate (integer, `pgactivate`) + - pgdeactivate (integer, `pgdeactivate`) + - pgfault (integer, `pgfault`) + - pgmajfault (integer, `pgmajfault`) + - pgrefill_dma (integer, `pgrefill_dma`) + - pgrefill_dma32 (integer, `pgrefill_dma32`) + - pgrefill_normal (integer, `pgrefill_normal`) + - pgrefill_movable (integer, `pgrefill_movable`) + - pgsteal_dma (integer, `pgsteal_dma`) + - pgsteal_dma32 (integer, `pgsteal_dma32`) + - pgsteal_normal (integer, `pgsteal_normal`) + - pgsteal_movable (integer, `pgsteal_movable`) + - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) + - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) + - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) + - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) + - pgscan_direct_dma (integer, `pgscan_direct_dma`) + - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) + - pgscan_direct_normal (integer, `pgscan_direct_normal`) + - pgscan_direct_movable (integer, `pgscan_direct_movable`) + - zone_reclaim_failed (integer, `zone_reclaim_failed`) + - pginodesteal (integer, `pginodesteal`) + - slabs_scanned (integer, `slabs_scanned`) + - kswapd_steal (integer, `kswapd_steal`) + - kswapd_inodesteal (integer, `kswapd_inodesteal`) + - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) + - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) + - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) + - pageoutrun (integer, `pageoutrun`) + - allocstall (integer, `allocstall`) + - pgrotated (integer, `pgrotated`) + - compact_blocks_moved (integer, `compact_blocks_moved`) + - compact_pages_moved (integer, `compact_pages_moved`) + - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) + - compact_stall (integer, `compact_stall`) + - compact_fail (integer, `compact_fail`) + - compact_success (integer, `compact_success`) + - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) + - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) + - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) + - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) + - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) + - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) + - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) + - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) + - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) + - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) + - thp_fault_alloc (integer, `thp_fault_alloc`) + - thp_fault_fallback (integer, `thp_fault_fallback`) + - thp_collapse_alloc (integer, `thp_collapse_alloc`) + - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) + - thp_split (integer, `thp_split`) + +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel_vmstat --test * Plugin: kernel_vmstat, Collection 1 > kernel_vmstat allocstall=81496i,compact_blocks_moved=238196i,compact_fail=135220i,compact_pagemigrate_failed=0i,compact_pages_moved=6370588i,compact_stall=142092i,compact_success=6872i,htlb_buddy_alloc_fail=0i,htlb_buddy_alloc_success=0i,kswapd_high_wmark_hit_quickly=25439i,kswapd_inodesteal=29770874i,kswapd_low_wmark_hit_quickly=8756i,kswapd_skip_congestion_wait=0i,kswapd_steal=291534428i,nr_active_anon=2515657i,nr_active_file=2244914i,nr_anon_pages=1358675i,nr_anon_transparent_hugepages=2034i,nr_bounce=0i,nr_dirty=5690i,nr_file_pages=5153546i,nr_free_pages=78730i,nr_inactive_anon=426259i,nr_inactive_file=2366791i,nr_isolated_anon=0i,nr_isolated_file=0i,nr_kernel_stack=579i,nr_mapped=558821i,nr_mlock=0i,nr_page_table_pages=11115i,nr_shmem=541689i,nr_slab_reclaimable=459806i,nr_slab_unreclaimable=47859i,nr_unevictable=0i,nr_unstable=0i,nr_vmscan_write=6206i,nr_writeback=0i,nr_writeback_temp=0i,numa_foreign=0i,numa_hit=5113399878i,numa_interleave=35793i,numa_local=5113399878i,numa_miss=0i,numa_other=0i,pageoutrun=505006i,pgactivate=375664931i,pgalloc_dma=0i,pgalloc_dma32=122480220i,pgalloc_movable=0i,pgalloc_normal=5233176719i,pgdeactivate=122735906i,pgfault=8699921410i,pgfree=5359765021i,pginodesteal=9188431i,pgmajfault=122210i,pgpgin=219717626i,pgpgout=3495885510i,pgrefill_dma=0i,pgrefill_dma32=1180010i,pgrefill_movable=0i,pgrefill_normal=119866676i,pgrotated=60620i,pgscan_direct_dma=0i,pgscan_direct_dma32=12256i,pgscan_direct_movable=0i,pgscan_direct_normal=31501600i,pgscan_kswapd_dma=0i,pgscan_kswapd_dma32=4480608i,pgscan_kswapd_movable=0i,pgscan_kswapd_normal=287857984i,pgsteal_dma=0i,pgsteal_dma32=4466436i,pgsteal_movable=0i,pgsteal_normal=318463755i,pswpin=2092i,pswpout=6206i,slabs_scanned=93775616i,thp_collapse_alloc=24857i,thp_collapse_alloc_failed=102214i,thp_fault_alloc=346219i,thp_fault_fallback=895453i,thp_split=9817i,unevictable_pgs_cleared=0i,unevictable_pgs_culled=1531i,unevictable_pgs_mlocked=6988i,unevictable_pgs_mlockfreed=0i,unevictable_pgs_munlocked=6988i,unevictable_pgs_rescued=5426i,unevictable_pgs_scanned=0i,unevictable_pgs_stranded=0i,zone_reclaim_failed=0i 1459455200071462843 diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index a5002d5f21204..248f21a47aa58 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -7,7 +7,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. [Kibana]: https://www.elastic.co/ -### Configuration +## Configuration ```toml [[inputs.kibana]] @@ -29,7 +29,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. # insecure_skip_verify = false ``` -### Metrics +## Metrics - kibana - tags: @@ -48,9 +48,9 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. - concurrent_connections (integer) - requests_per_sec (float) -### Example Output +## Example Output -``` +```shell kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` @@ -58,8 +58,8 @@ kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5 Requires the following tools: -* [Docker](https://docs.docker.com/get-docker/) -* [Docker Compose](https://docs.docker.com/compose/install/) +- [Docker](https://docs.docker.com/get-docker/) +- [Docker Compose](https://docs.docker.com/compose/install/) From the root of this project execute the following script: `./plugins/inputs/kibana/test_environment/run_test_env.sh` @@ -67,4 +67,4 @@ This will build the latest Telegraf and then start up Kibana and Elasticsearch, Then you can attach to the telegraf container to inspect the file `/tmp/metrics.out` to see if the status is being reported. -The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. \ No newline at end of file +The Visual Studio Code [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension provides an easy user interface to attach to the running container. diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index ba1a7580fd29b..681c77c636ce7 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -3,8 +3,7 @@ The [Kinesis][kinesis] consumer plugin reads from a Kinesis data stream and creates metrics using one of the supported [input data formats][]. - -### Configuration +## Configuration ```toml [[inputs.kinesis_consumer]] @@ -74,29 +73,28 @@ and creates metrics using one of the supported [input data formats][]. table_name = "default" ``` - -#### Required AWS IAM permissions +### Required AWS IAM permissions Kinesis: - - DescribeStream - - GetRecords - - GetShardIterator + +- DescribeStream +- GetRecords +- GetShardIterator DynamoDB: - - GetItem - - PutItem +- GetItem +- PutItem -#### DynamoDB Checkpoint +### DynamoDB Checkpoint The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage this functionality, create a table with the following string type keys: -``` +```shell Partition key: namespace Sort key: shard_id ``` - [kinesis]: https://aws.amazon.com/kinesis/ [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go index 6d52f07835e6b..1e0d935e03cc6 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -2,15 +2,17 @@ package kinesis_consumer import ( "encoding/base64" + "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kinesis/types" consumer "github.com/harlow/kinesis-consumer" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "testing" ) func TestKinesisConsumer_onMessage(t *testing.T) { @@ -177,7 +179,7 @@ func TestKinesisConsumer_onMessage(t *testing.T) { ContentEncoding: "notsupported", } err := k.Init() - assert.NotNil(t, err) + require.NotNil(t, err) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -187,18 +189,18 @@ func TestKinesisConsumer_onMessage(t *testing.T) { records: tt.fields.records, } err := k.Init() - assert.Nil(t, err) + require.Nil(t, err) acc := testutil.Accumulator{} if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) } - assert.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + require.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) for _, metric := range acc.Metrics { if logEventMessage, ok := metric.Fields["message"]; ok { - assert.Contains(t, logEventMessage.(string), tt.expected.messageContains) + require.Contains(t, logEventMessage.(string), tt.expected.messageContains) } else { t.Errorf("Expect logEvents to be present") } diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md index 518dd5d7f3720..14d77556f0465 100644 --- a/plugins/inputs/knx_listener/README.md +++ b/plugins/inputs/knx_listener/README.md @@ -3,9 +3,9 @@ The KNX input plugin that listens for messages on the KNX home-automation bus. This plugin connects to the KNX bus via a KNX-IP interface. Information about supported KNX message datapoint types can be found at the -underlying "knx-go" project site (https://github.com/vapourismo/knx-go). +underlying "knx-go" project site (). -### Configuration +## Configuration This is a sample config for the plugin. @@ -34,7 +34,7 @@ This is a sample config for the plugin. # addresses = ["5/5/3"] ``` -#### Measurement configurations +### Measurement configurations Each measurement contains only one datapoint-type (DPT) and assigns a list of addresses to this measurement. You can, for example group all temperature sensor @@ -43,23 +43,24 @@ messages of one datapoint-type to multiple measurements. **NOTE: You should not assign a group-address (GA) to multiple measurements!** -### Metrics +## Metrics Received KNX data is stored in the named measurement as configured above using the "value" field. Additional to the value, there are the following tags added to the datapoint: - - "groupaddress": KNX group-address corresponding to the value - - "unit": unit of the value - - "source": KNX physical address sending the value + +- "groupaddress": KNX group-address corresponding to the value +- "unit": unit of the value +- "source": KNX physical address sending the value To find out about the datatype of the datapoint please check your KNX project, the KNX-specification or the "knx-go" project for the corresponding DPT. -### Example Output +## Example Output This section shows example output in Line Protocol format. -``` +```shell illumination,groupaddress=5/5/4,host=Hugin,source=1.1.12,unit=lux value=17.889999389648438 1582132674999013274 temperature,groupaddress=5/5/1,host=Hugin,source=1.1.8,unit=°C value=17.799999237060547 1582132663427587361 windowopen,groupaddress=1/0/1,host=Hugin,source=1.1.3 value=true 1582132630425581320 diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go index b0502fbbc8e95..adb07eb6d0113 100644 --- a/plugins/inputs/knx_listener/knx_listener_test.go +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -6,14 +6,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/vapourismo/knx-go/knx" "github.com/vapourismo/knx-go/knx/cemi" "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf/testutil" ) const epsilon = 1e-3 @@ -127,17 +125,17 @@ func TestRegularReceives_DPT(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, len(testcases)) for i, m := range acc.Metrics { - assert.Equal(t, "test", m.Measurement) - assert.Equal(t, testcases[i].address, m.Tags["groupaddress"]) - assert.Len(t, m.Fields, 1) + require.Equal(t, "test", m.Measurement) + require.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + require.Len(t, m.Fields, 1) switch v := testcases[i].value.(type) { case bool, int64, uint64: - assert.Equal(t, v, m.Fields["value"]) + require.Equal(t, v, m.Fields["value"]) case float64: - assert.InDelta(t, v, m.Fields["value"], epsilon) + require.InDelta(t, v, m.Fields["value"], epsilon) } - assert.True(t, !tstop.Before(m.Time)) - assert.True(t, !tstart.After(m.Time)) + require.True(t, !tstop.Before(m.Time)) + require.True(t, !tstart.After(m.Time)) } } @@ -178,13 +176,13 @@ func TestRegularReceives_MultipleMessages(t *testing.T) { // Check if we got what we expected require.Len(t, acc.Metrics, 2) - assert.Equal(t, "temperature", acc.Metrics[0].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[0].Fields, 1) - assert.Equal(t, true, acc.Metrics[0].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[0].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + require.Len(t, acc.Metrics[0].Fields, 1) + require.Equal(t, true, acc.Metrics[0].Fields["value"]) - assert.Equal(t, "temperature", acc.Metrics[1].Measurement) - assert.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) - assert.Len(t, acc.Metrics[1].Fields, 1) - assert.Equal(t, false, acc.Metrics[1].Fields["value"]) + require.Equal(t, "temperature", acc.Metrics[1].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + require.Len(t, acc.Metrics[1].Fields, 1) + require.Equal(t, false, acc.Metrics[1].Fields["value"]) } diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index 7803d4fc4e9eb..2a7a010256767 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -19,7 +19,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -31,7 +31,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration: +## Configuration ```toml [[inputs.kube_inventory]] @@ -74,6 +74,8 @@ avoid cardinality issues: # tls_cert = "/path/to/certfile" ## Used for TLS client certificate authentication # tls_key = "/path/to/keyfile" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -81,7 +83,7 @@ avoid cardinality issues: # fielddrop = ["terminated_reason"] ``` -#### Kubernetes Permissions +## Kubernetes Permissions If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. @@ -150,7 +152,7 @@ tls_cert = "/run/telegraf-kubernetes-cert" tls_key = "/run/telegraf-kubernetes-key" ``` -### Metrics: +## Metrics - kubernetes_daemonset - tags: @@ -167,7 +169,7 @@ tls_key = "/run/telegraf-kubernetes-key" - number_unavailable - updated_number_scheduled -* kubernetes_deployment +- kubernetes_deployment - tags: - deployment_name - namespace @@ -192,7 +194,7 @@ tls_key = "/run/telegraf-kubernetes-key" - ready - port -* kubernetes_ingress +- kubernetes_ingress - tags: - ingress_name - namespace @@ -220,7 +222,7 @@ tls_key = "/run/telegraf-kubernetes-key" - allocatable_memory_bytes - allocatable_pods -* kubernetes_persistentvolume +- kubernetes_persistentvolume - tags: - pv_name - phase @@ -238,7 +240,7 @@ tls_key = "/run/telegraf-kubernetes-key" - fields: - phase_type (int, [see below](#pvc-phase_type)) -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace @@ -274,7 +276,7 @@ tls_key = "/run/telegraf-kubernetes-key" - port - target_port -* kubernetes_statefulset +- kubernetes_statefulset - tags: - statefulset_name - namespace @@ -289,7 +291,7 @@ tls_key = "/run/telegraf-kubernetes-key" - spec_replicas - observed_generation -#### pv `phase_type` +### pv `phase_type` The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. @@ -302,7 +304,7 @@ The persistentvolume "phase" is saved in the `phase` tag with a correlated numer | available | 4 | | unknown | 5 | -#### pvc `phase_type` +### pvc `phase_type` The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. @@ -313,9 +315,9 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated | pending | 2 | | unknown | 3 | -### Example Output: +## Example Output -``` +```shell kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index 66455b004f918..da03c643283fe 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -23,7 +23,7 @@ type client struct { func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { c, err := kubernetes.NewForConfig(&rest.Config{ TLSClientConfig: rest.TLSClientConfig{ - ServerName: baseURL, + ServerName: tlsConfig.ServerName, Insecure: tlsConfig.InsecureSkipVerify, CAFile: tlsConfig.TLSCA, CertFile: tlsConfig.TLSCert, diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go index 24db993dd39bb..a1167a8ed2e02 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -3,7 +3,6 @@ package kube_inventory import ( "context" "fmt" - "log" "os" "strconv" "strings" @@ -37,6 +36,8 @@ type KubernetesInventory struct { SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig client *client @@ -80,6 +81,7 @@ var sampleConfig = ` # tls_ca = "/path/to/cafile" # tls_cert = "/path/to/certfile" # tls_key = "/path/to/keyfile" + # tls_server_name = "kubernetes.example.com" ## Use TLS but skip chain & host verification # insecure_skip_verify = false ` @@ -169,15 +171,15 @@ func atoi(s string) int64 { return i } -func convertQuantity(s string, m float64) int64 { +func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + ki.Log.Debugf("failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + ki.Log.Debugf("failed to parse float: %s", err.Error()) return 0 } if m < 1 { @@ -187,11 +189,11 @@ func convertQuantity(s string, m float64) int64 { } func (ki *KubernetesInventory) createSelectorFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) if err != nil { return err } - ki.selectorFilter = filter + ki.selectorFilter = selectorFilter return nil } diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index 3c7c9cb38e160..b46b4e6209ffc 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -26,13 +26,12 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato } for resourceName, val := range n.Status.Capacity { - switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = convertQuantity(val.String(), 1) - fields["capacity_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.String(), 1) + fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["capacity_pods"] = atoi(val.String()) } @@ -41,10 +40,10 @@ func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulato for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = convertQuantity(val.String(), 1) - fields["allocatable_millicpu_cores"] = convertQuantity(val.String(), 1000) + fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.String(), 1) + fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": fields["allocatable_pods"] = atoi(val.String()) } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index ab4e5dd287cbe..ed95dd63d970d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -35,11 +35,11 @@ func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) if !ok { cs = &corev1.ContainerStatus{} } - gatherPodContainer(ki, p, *cs, c, acc) + ki.gatherPodContainer(p, *cs, c, acc) } } -func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" @@ -103,17 +103,17 @@ func gatherPodContainer(ki *KubernetesInventory, p corev1.Pod, cs corev1.Contain for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.String(), 1000) + fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.String(), 1) + fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 8ef5ef7b1dfca..07907f7eba18e 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -8,8 +8,8 @@ should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: -``` -$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' +```sh +curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' ``` In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. @@ -20,7 +20,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -32,7 +32,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration ```toml [[inputs.kubernetes]] @@ -62,7 +62,7 @@ avoid cardinality issues: # insecure_skip_verify = false ``` -### DaemonSet +## DaemonSet For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes Architecture][k8s-telegraf] or view the Helm charts: @@ -72,7 +72,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - [Chronograf][] - [Kapacitor][] -### Metrics +## Metrics - kubernetes_node - tags: @@ -97,7 +97,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - runtime_image_fs_capacity_bytes - runtime_image_fs_used_bytes -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace @@ -129,7 +129,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - capacity_bytes - used_bytes -* kubernetes_pod_network +- kubernetes_pod_network - tags: - namespace - node_name @@ -140,9 +140,9 @@ Architecture][k8s-telegraf] or view the Helm charts: - tx_bytes - tx_errors -### Example Output +## Example Output -``` +```shell kubernetes_node kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md index c47b22fee1dd1..f308b1218e46a 100644 --- a/plugins/inputs/lanz/README.md +++ b/plugins/inputs/lanz/README.md @@ -5,18 +5,18 @@ This plugin provides a consumer for use with Arista Networks’ Latency Analyzer Metrics are read from a stream of data via TCP through port 50001 on the switches management IP. The data is in Protobuffers format. For more information on Arista LANZ -- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz +- This plugin uses Arista's sdk. -- https://github.com/aristanetworks/goarista +- -### Configuration +## Configuration You will need to configure LANZ and enable streaming LANZ data. -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292 +- +- ```toml [[inputs.lanz]] @@ -26,9 +26,9 @@ You will need to configure LANZ and enable streaming LANZ data. ] ``` -### Metrics +## Metrics -For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto +For more details on the metrics see - lanz_congestion_record: - tags: @@ -47,7 +47,7 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - tx_latency (integer) - q_drop_count (integer) -+ lanz_global_buffer_usage_record +- lanz_global_buffer_usage_record - tags: - entry_type - source @@ -57,31 +57,31 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - buffer_size (integer) - duration (integer) - - -### Sample Queries +## Sample Queries Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max buffer_size for over the last hour for all switches. + ```sql SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname" ``` -### Example output -``` +## Example output + +```shell lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815 lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045 lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464 lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302 ``` - - diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 86bb93a8f754b..a77e99df61f6e 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -8,6 +8,7 @@ import ( "github.com/aristanetworks/goarista/lanz" pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -85,6 +86,7 @@ func (l *Lanz) Stop() { } func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { + //nolint:gosimple // for-select used on purpose for { select { case msg, ok := <-in: diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 684bfc8902bb8..f2a8b5815e36d 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -6,7 +6,8 @@ import ( "testing" pb "github.com/aristanetworks/goarista/lanz/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf/testutil" ) diff --git a/plugins/inputs/leofs/README.md b/plugins/inputs/leofs/README.md index bd028e65ab048..db77e8a527d9f 100644 --- a/plugins/inputs/leofs/README.md +++ b/plugins/inputs/leofs/README.md @@ -2,7 +2,7 @@ The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [LeoFS Documentation / System Administration / System Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/). -## Configuration: +## Configuration ```toml # Sample Config: @@ -11,57 +11,60 @@ The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using servers = ["127.0.0.1:4010"] ``` -## Measurements & Fields: +## Measurements & Fields + ### Statistics specific to the internals of LeoManager -#### Erlang VM + +#### Erlang VM of LeoManager - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min ### Statistics specific to the internals of LeoStorage -#### Erlang VM + +### Erlang VM of LeoStorage - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +### Total Number of Requests for LeoStorage - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Total Number of Objects and Total Size of Objects @@ -103,35 +106,36 @@ Note: The following items are available since LeoFS v1.4.0: Note: The all items are available since LeoFS v1.4.0. ### Statistics specific to the internals of LeoGateway -#### Erlang VM + +#### Erlang VM of LeoGateway - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +#### Total Number of Requests for LeoGateway - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Object Cache @@ -140,15 +144,13 @@ Note: The all items are available since LeoFS v1.4.0. - total_of_files - total_cached_size - -### Tags: +### Tags All measurements have the following tags: - node - -### Example output: +### Example output #### LeoManager @@ -221,7 +223,7 @@ $ ./telegraf --config ./plugins/inputs/leofs/leo_storage.conf --input-filter leo #### LeoGateway -``` +```shell $ ./telegraf --config ./plugins/inputs/leofs/leo_gateway.conf --input-filter leofs --test > leofs, host=gateway_0, node=gateway_0@127.0.0.1 allocated_memory=87941120, diff --git a/plugins/inputs/linux_sysctl_fs/README.md b/plugins/inputs/linux_sysctl_fs/README.md index d6598e16ff30a..30e2f30881fab 100644 --- a/plugins/inputs/linux_sysctl_fs/README.md +++ b/plugins/inputs/linux_sysctl_fs/README.md @@ -1,9 +1,9 @@ # Linux Sysctl FS Input Plugin -The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt. +The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at . Example output: -``` +```shell > linux_sysctl_fs,host=foo dentry-want-pages=0i,file-max=44222i,aio-max-nr=65536i,inode-preshrink-nr=0i,dentry-nr=64340i,dentry-unused-nr=55274i,file-nr=1568i,aio-nr=0i,inode-nr=35952i,inode-free-nr=12957i,dentry-age-limit=45i 1490982022000000000 ``` diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 0abdba2c972df..29a66828e7455 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,19 +1,21 @@ # Logparser Input Plugin +## Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the [`grok` data format][grok parser] + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. - The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: + - For options in the `[inputs.logparser.grok]` section, the equivalent option will have add the `grok_` prefix when using them in the `tail` input. - The grok `measurement` option can be replaced using the standard plugin `name_override` option. Migration Example: + ```diff - [[inputs.logparser]] - files = ["/var/log/apache/access.log"] @@ -38,7 +40,7 @@ Migration Example: + data_format = "grok" ``` -### Configuration +## Configuration ```toml [[inputs.logparser]] @@ -90,15 +92,14 @@ Migration Example: # timezone = "Canada/Eastern" ``` -### Grok Parser +## Grok Parser Reference the [grok parser][] documentation to setup the grok section of the configuration. +## Additional Resources -### Additional Resources - -- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ +- [tail]: /plugins/inputs/tail/README.md [grok parser]: /plugins/parsers/grok/README.md diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 3100c615cd4e4..a2f780afd21b9 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -25,7 +24,7 @@ func TestStartNoParsers(t *testing.T) { } acc := testutil.Accumulator{} - assert.Error(t, logparser.Start(&acc)) + require.Error(t, logparser.Start(&acc)) } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { @@ -41,7 +40,7 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { acc := testutil.Accumulator{} err := logparser.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestGrokParseLogFiles(t *testing.T) { @@ -112,7 +111,7 @@ func TestGrokParseLogFiles(t *testing.T) { func TestGrokParseLogFilesAppearLater(t *testing.T) { emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") defer os.RemoveAll(emptydir) - assert.NoError(t, err) + require.NoError(t, err) logparser := &LogParserPlugin{ Log: testutil.Logger{}, @@ -126,17 +125,17 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) - assert.NoError(t, err) + require.NoError(t, err) err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) - assert.NoError(t, err) + require.NoError(t, err) - assert.NoError(t, acc.GatherError(logparser.Gather)) + require.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) logparser.Stop() @@ -170,7 +169,7 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -202,7 +201,7 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 95ec3e6feae66..ee8ff59fe0d2d 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -5,7 +5,7 @@ This plugin reads metrics exposed by Logstash 5 and later is supported. -### Configuration +## Configuration ```toml [[inputs.logstash]] @@ -40,7 +40,7 @@ Logstash 5 and later is supported. # "X-Special-Header" = "Special-Value" ``` -### Metrics +## Metrics Additional plugin stats may be collected (because logstash doesn't consistently expose all stats) @@ -80,7 +80,7 @@ Additional plugin stats may be collected (because logstash doesn't consistently - gc_collectors_young_collection_count - uptime_in_millis -+ logstash_process +- logstash_process - tags: - node_id - node_name @@ -112,7 +112,7 @@ Additional plugin stats may be collected (because logstash doesn't consistently - filtered - out -+ logstash_plugins +- logstash_plugins - tags: - node_id - node_name @@ -148,9 +148,9 @@ Additional plugin stats may be collected (because logstash doesn't consistently - page_capacity_in_bytes - queue_size_in_bytes -### Example Output +## Example Output -``` +```shell logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000 logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000 logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000 diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index 6fcaadabcd244..9f5a198587e4d 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -179,8 +179,8 @@ func (logstash *Logstash) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { - request, err := http.NewRequest("GET", url, nil) +func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -206,7 +206,7 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -218,10 +218,10 @@ func (logstash *Logstash) gatherJSONData(url string, value interface{}) error { } // gatherJVMStats gather the JVM metrics and add results to the accumulator -func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJSONData(url, jvmStats) + err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } @@ -244,10 +244,10 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu } // gatherJVMStats gather the Process metrics and add results to the accumulator -func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJSONData(url, processStats) + err := logstash.gatherJSONData(address, processStats) if err != nil { return err } @@ -403,10 +403,10 @@ func (logstash *Logstash) gatherQueueStats( } // gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) -func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJSONData(url, pipelineStats) + err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } @@ -447,10 +447,10 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } // gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) -func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJSONData(url, pipelinesStats) + err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md index dbdf58f73b257..a6d8b08857b74 100644 --- a/plugins/inputs/lustre2/README.md +++ b/plugins/inputs/lustre2/README.md @@ -5,7 +5,7 @@ many requirements of leadership class HPC simulation environments. This plugin monitors the Lustre file system using its entries in the proc filesystem. -### Configuration +## Configuration ```toml # Read metrics from local Lustre service on OST, MDS @@ -24,7 +24,7 @@ This plugin monitors the Lustre file system using its entries in the proc filesy # ] ``` -### Metrics +## Metrics From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`: @@ -113,17 +113,16 @@ From `/proc/fs/lustre/mdt/*/job_stats`: - jobstats_sync - jobstats_unlink - -### Troubleshooting +## Troubleshooting Check for the default or custom procfiles in the proc filesystem, and reference the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not report all information from these files, only a limited set of items corresponding to the above metric fields. -### Example Output +## Example Output -``` +```shell lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000 lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000 diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 7fd3fd91f469e..3c5659e18f14f 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -7,11 +7,11 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Set config file variables to point to fake directory structure instead of /proc? @@ -358,7 +358,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) - assert.Equal(t, Lustre2{ + require.Equal(t, Lustre2{ OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", diff --git a/plugins/inputs/lvm/README.md b/plugins/inputs/lvm/README.md index c0ce1a2e6008a..40f37500b68ed 100644 --- a/plugins/inputs/lvm/README.md +++ b/plugins/inputs/lvm/README.md @@ -3,7 +3,7 @@ The Logical Volume Management (LVM) input plugin collects information about physical volumes, volume groups, and logical volumes. -### Configuration +## Configuration The `lvm` command requires elevated permissions. If the user has configured sudo with the ability to run these commands, then set the `use_sudo` to true. @@ -15,7 +15,7 @@ sudo with the ability to run these commands, then set the `use_sudo` to true. use_sudo = false ``` -#### Using sudo +### Using sudo If your account does not already have the ability to run commands with passwordless sudo then updates to the sudoers file are required. Below @@ -31,7 +31,7 @@ Cmnd_Alias LVM = /usr/sbin/pvs *, /usr/sbin/vgs *, /usr/sbin/lvs * Defaults!LVM !logfile, !syslog, !pam_session ``` -### Metrics +## Metrics Metrics are broken out by physical volume (pv), volume group (vg), and logical volume (lv): @@ -64,14 +64,16 @@ volume (lv): - data_percent - meta_percent -### Example Output +## Example Output The following example shows a system with the root partition on an LVM group as well as with a Docker thin-provisioned LVM group on a second drive: +```shell > lvm_physical_vol,path=/dev/sda2,vol_group=vgroot free=0i,size=249510756352i,used=249510756352i,used_percent=100 1631823026000000000 > lvm_physical_vol,path=/dev/sdb,vol_group=docker free=3858759680i,size=128316342272i,used=124457582592i,used_percent=96.99277612525741 1631823026000000000 > lvm_vol_group,name=vgroot free=0i,logical_volume_count=1i,physical_volume_count=1i,size=249510756352i,snapshot_count=0i,used_percent=100 1631823026000000000 > lvm_vol_group,name=docker free=3858759680i,logical_volume_count=1i,physical_volume_count=1i,size=128316342272i,snapshot_count=0i,used_percent=96.99277612525741 1631823026000000000 > lvm_logical_vol,name=lvroot,vol_group=vgroot data_percent=0,metadata_percent=0,size=249510756352i 1631823026000000000 > lvm_logical_vol,name=thinpool,vol_group=docker data_percent=0.36000001430511475,metadata_percent=1.3300000429153442,size=121899057152i 1631823026000000000 +``` diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md index 46750f6fc5efa..a3a7f599de8bf 100644 --- a/plugins/inputs/mailchimp/README.md +++ b/plugins/inputs/mailchimp/README.md @@ -2,7 +2,7 @@ Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/). -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage mailchimp`. @@ -21,7 +21,7 @@ generate it using `telegraf --usage mailchimp`. # campaign_id = "" ``` -### Metrics +## Metrics - mailchimp - tags: diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index 2f6cecdb9e0da..71e7bcea6d535 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,12 +5,13 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "net/url" "regexp" "sync" "time" + + "github.com/influxdata/telegraf" ) const ( @@ -22,11 +23,12 @@ var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper - Debug bool + debug bool sync.Mutex url *url.URL + log telegraf.Logger } type ReportsParams struct { @@ -53,12 +55,12 @@ func (p *ReportsParams) String() string { return v.Encode() } -func NewChimpAPI(apiKey string) *ChimpAPI { +func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI { u := &url.URL{} u.Scheme = "https" u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) - return &ChimpAPI{url: u} + return &ChimpAPI{url: u, log: log} } type APIError struct { @@ -90,7 +92,7 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.url.Path = reportsEndpoint var response ReportsResponse - rawjson, err := runChimp(a, params) + rawjson, err := a.runChimp(params) if err != nil { return response, err } @@ -109,7 +111,7 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report - rawjson, err := runChimp(a, ReportsParams{}) + rawjson, err := a.runChimp(ReportsParams{}) if err != nil { return response, err } @@ -122,21 +124,21 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { return response, nil } -func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { +func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) { client := &http.Client{ - Transport: api.Transport, + Transport: a.Transport, Timeout: 4 * time.Second, } var b bytes.Buffer - req, err := http.NewRequest("GET", api.url.String(), &b) + req, err := http.NewRequest("GET", a.url.String(), &b) if err != nil { return nil, err } req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") - if api.Debug { - log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) + if a.debug { + a.log.Debugf("request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -148,15 +150,15 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) - return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body) } body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - if api.Debug { - log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) + if a.debug { + a.log.Debugf("response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index fe6892bf48743..b898cb6ba1768 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -14,6 +14,8 @@ type MailChimp struct { APIKey string `toml:"api_key"` DaysOld int `toml:"days_old"` CampaignID string `toml:"campaign_id"` + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -35,12 +37,13 @@ func (m *MailChimp) Description() string { return "Gathers metrics from the /3.0/reports MailChimp API" } -func (m *MailChimp) Gather(acc telegraf.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.APIKey) - } - m.api.Debug = false +func (m *MailChimp) Init() error { + m.api = NewChimpAPI(m.APIKey, m.Log) + return nil +} + +func (m *MailChimp) Gather(acc telegraf.Accumulator) error { if m.CampaignID == "" { since := "" if m.DaysOld > 0 { diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 1366d8859df5d..1df6c52cf6256 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMailChimpGatherReports(t *testing.T) { @@ -28,7 +28,8 @@ func TestMailChimpGatherReports(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -43,22 +44,22 @@ func TestMailChimpGatherReports(t *testing.T) { tags["campaign_title"] = "Freddie's Jokes Vol. 1" fields := map[string]interface{}{ - "emails_sent": int(200), - "abuse_reports": int(0), - "unsubscribed": int(2), - "hard_bounces": int(0), - "soft_bounces": int(2), - "syntax_errors": int(0), - "forwards_count": int(0), - "forwards_opens": int(0), - "opens_total": int(186), - "unique_opens": int(100), - "clicks_total": int(42), - "unique_clicks": int(400), - "unique_subscriber_clicks": int(42), - "facebook_recipient_likes": int(5), - "facebook_unique_likes": int(8), - "facebook_likes": int(42), + "emails_sent": 200, + "abuse_reports": 0, + "unsubscribed": 2, + "hard_bounces": 0, + "soft_bounces": 2, + "syntax_errors": 0, + "forwards_count": 0, + "forwards_opens": 0, + "opens_total": 186, + "unique_opens": 100, + "clicks_total": 42, + "unique_clicks": 400, + "unique_subscriber_clicks": 42, + "facebook_recipient_likes": 5, + "facebook_unique_likes": 8, + "facebook_likes": 42, "open_rate": float64(42), "click_rate": float64(42), "industry_open_rate": float64(0.17076777144396), @@ -92,7 +93,8 @@ func TestMailChimpGatherReport(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -157,7 +159,8 @@ func TestMailChimpGatherError(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md index 7feb4a10d9d04..acd6100df75f8 100644 --- a/plugins/inputs/marklogic/README.md +++ b/plugins/inputs/marklogic/README.md @@ -2,7 +2,7 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more host. -### Configuration: +## Configuration ```toml [[inputs.marklogic]] @@ -24,7 +24,7 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos # insecure_skip_verify = false ``` -### Metrics +## Metrics - marklogic - tags: @@ -56,9 +56,9 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos - http_server_receive_bytes - http_server_send_bytes -### Example Output: +## Example Output -``` +```shell $> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000 ``` diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index d2ef139bfc7a3..30f9ee6403074 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -163,9 +163,9 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { return nil } -func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, address string) error { ml := &MlHost{} - if err := c.gatherJSONData(url, ml); err != nil { + if err := c.gatherJSONData(address, ml); err != nil { return err } @@ -225,8 +225,8 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { return client, nil } -func (c *Marklogic) gatherJSONData(url string, v interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (c *Marklogic) gatherJSONData(address string, v interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/mcrouter/README.md b/plugins/inputs/mcrouter/README.md index 05c2597869e05..a657ef125a6de 100644 --- a/plugins/inputs/mcrouter/README.md +++ b/plugins/inputs/mcrouter/README.md @@ -2,7 +2,7 @@ This plugin gathers statistics data from a Mcrouter server. -### Configuration: +## Configuration ```toml # Read metrics from one or many mcrouter servers. @@ -15,7 +15,7 @@ This plugin gathers statistics data from a Mcrouter server. # timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the *mcrouter* measurement. @@ -88,16 +88,14 @@ Fields: * cmd_delete_out_all * cmd_lease_set_out_all -### Tags: +## Tags * Mcrouter measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) +## Example Output - -### Example Output: - -``` +```shell $ ./telegraf --config telegraf.conf --input-filter mcrouter --test mcrouter,server=localhost:11211 uptime=166,num_servers=1,num_servers_new=1,num_servers_up=0,num_servers_down=0,num_servers_closed=0,num_clients=1,num_suspect_servers=0,destination_batches_sum=0,destination_requests_sum=0,outstanding_route_get_reqs_queued=0,outstanding_route_update_reqs_queued=0,outstanding_route_get_avg_queue_size=0,outstanding_route_update_avg_queue_size=0,outstanding_route_get_avg_wait_time_sec=0,outstanding_route_update_avg_wait_time_sec=0,retrans_closed_connections=0,destination_pending_reqs=0,destination_inflight_reqs=0,destination_batch_size=0,asynclog_requests=0,proxy_reqs_processing=1,proxy_reqs_waiting=0,client_queue_notify_period=0,rusage_system=0.040966,rusage_user=0.020483,ps_num_minor_faults=2490,ps_num_major_faults=11,ps_user_time_sec=0.02,ps_system_time_sec=0.04,ps_vsize=697741312,ps_rss=10563584,fibers_allocated=0,fibers_pool_size=0,fibers_stack_high_watermark=0,successful_client_connections=18,duration_us=0,destination_max_pending_reqs=0,destination_max_inflight_reqs=0,retrans_per_kbyte_max=0,cmd_get_count=0,cmd_delete_out=0,cmd_lease_get=0,cmd_set=0,cmd_get_out_all=0,cmd_get_out=0,cmd_lease_set_count=0,cmd_other_out_all=0,cmd_lease_get_out=0,cmd_set_count=0,cmd_lease_set_out=0,cmd_delete_count=0,cmd_other=0,cmd_delete=0,cmd_get=0,cmd_lease_set=0,cmd_set_out=0,cmd_lease_get_count=0,cmd_other_out=0,cmd_lease_get_out_all=0,cmd_set_out_all=0,cmd_other_count=0,cmd_delete_out_all=0,cmd_lease_set_out_all=0 1453831884664956455 ``` diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index af197c3072089..07599ca2cc0b0 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -146,32 +146,33 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } // ParseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) ParseAddress(address string) (string, string, error) { - var protocol string +func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol string, err error) { var host string var port string - u, parseError := url.Parse(address) + parsedAddress = address + + u, parseError := url.Parse(parsedAddress) if parseError != nil { - return "", "", fmt.Errorf("Invalid server address") + return "", "", fmt.Errorf("invalid server address") } if u.Scheme != "tcp" && u.Scheme != "unix" { - return "", "", fmt.Errorf("Invalid server protocol") + return "", "", fmt.Errorf("invalid server protocol") } protocol = u.Scheme if protocol == "unix" { if u.Path == "" { - return "", "", fmt.Errorf("Invalid unix socket path") + return "", "", fmt.Errorf("invalid unix socket path") } - address = u.Path + parsedAddress = u.Path } else { if u.Host == "" { - return "", "", fmt.Errorf("Invalid host") + return "", "", fmt.Errorf("invalid host") } host = u.Hostname() @@ -185,10 +186,10 @@ func (m *Mcrouter) ParseAddress(address string) (string, string, error) { port = defaultServerURL.Port() } - address = host + ":" + port + parsedAddress = host + ":" + port } - return address, protocol, nil + return parsedAddress, protocol, nil } func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index a9b525d46b79c..f02f2b53d4b85 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestAddressParsing(t *testing.T) { @@ -30,17 +30,17 @@ func TestAddressParsing(t *testing.T) { for _, args := range acceptTests { address, protocol, err := m.ParseAddress(args[0]) - assert.Nil(t, err, args[0]) - assert.True(t, address == args[1], args[0]) - assert.True(t, protocol == args[2], args[0]) + require.Nil(t, err, args[0]) + require.Equal(t, args[1], address, args[0]) + require.Equal(t, args[2], protocol, args[0]) } for _, addr := range rejectTests { address, protocol, err := m.ParseAddress(addr) - assert.NotNil(t, err, addr) - assert.Empty(t, address, addr) - assert.Empty(t, protocol, addr) + require.NotNil(t, err, addr) + require.Empty(t, address, addr) + require.Empty(t, protocol, addr) } } @@ -129,11 +129,11 @@ func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) + require.True(t, acc.HasInt64Field("mcrouter", metric), metric) } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("mcrouter", metric), metric) + require.True(t, acc.HasFloatField("mcrouter", metric), metric) } } diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md index 6180833b69ade..462ac89ca5507 100644 --- a/plugins/inputs/mdstat/README.md +++ b/plugins/inputs/mdstat/README.md @@ -1,15 +1,14 @@ # mdstat Input Plugin The mdstat plugin gathers statistics about any Linux MD RAID arrays configured on the host -by reading /proc/mdstat. For a full list of available fields see the +by reading /proc/mdstat. For a full list of available fields see the /proc/mdstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). -For a better idea of what each field represents, see the +For a better idea of what each field represents, see the [mdstat man page](https://raid.wiki.kernel.org/index.php/Mdstat). -Stat collection based on Prometheus' mdstat collection library at https://github.com/prometheus/procfs/blob/master/mdstat.go +Stat collection based on Prometheus' mdstat collection library at - -### Configuration: +## Configuration ```toml # Get kernel statistics from /proc/mdstat @@ -19,7 +18,7 @@ Stat collection based on Prometheus' mdstat collection library at https://github # file_name = "/proc/mdstat" ``` -### Measurements & Fields: +## Measurements & Fields - mdstat - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) @@ -32,16 +31,16 @@ Stat collection based on Prometheus' mdstat collection library at https://github - DisksSpare (the current count of "spare" disks in the array) - DisksTotal (total count of disks in the array) -### Tags: +## Tags - mdstat - ActivityState (`active` or `inactive`) - Devices (comma separated list of devices that make up the array) - Name (name of the array) -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test * Plugin: mdstat, Collection 1 > mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go index 070b7ddd234f5..27397f715ad0d 100644 --- a/plugins/inputs/mdstat/mdstat_test.go +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -7,8 +7,9 @@ import ( "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestFullMdstatProcFile(t *testing.T) { @@ -19,7 +20,7 @@ func TestFullMdstatProcFile(t *testing.T) { } acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(10620027200), @@ -46,7 +47,7 @@ func TestFailedDiskMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "BlocksSynced": int64(5860144128), @@ -73,7 +74,7 @@ func TestEmptyMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) } func TestInvalidMdStatProcFile1(t *testing.T) { @@ -86,7 +87,7 @@ func TestInvalidMdStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } const mdStatFileFull = ` diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md index 9122b885a09e1..3fff5178de75d 100644 --- a/plugins/inputs/mem/README.md +++ b/plugins/inputs/mem/README.md @@ -5,14 +5,15 @@ The mem plugin collects system memory metrics. For a more complete explanation of the difference between *used* and *actual_used* RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). -### Configuration: +## Configuration + ```toml # Read metrics about memory usage [[inputs.mem]] # no configuration ``` -### Metrics: +## Metrics Available fields are dependent on platform. @@ -55,7 +56,8 @@ Available fields are dependent on platform. - write_back (integer, Linux) - write_back_tmp (integer, Linux) -### Example Output: -``` +## Example Output + +```shell mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000 ``` diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/memory.go index d01bf2a0fa156..84fcbc32eb3ea 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/memory.go @@ -79,16 +79,16 @@ func (ms *MemStats) Gather(acc telegraf.Accumulator) error { fields["page_tables"] = vm.PageTables fields["shared"] = vm.Shared fields["slab"] = vm.Slab - fields["sreclaimable"] = vm.SReclaimable - fields["sunreclaim"] = vm.SUnreclaim + fields["sreclaimable"] = vm.Sreclaimable + fields["sunreclaim"] = vm.Sunreclaim fields["swap_cached"] = vm.SwapCached fields["swap_free"] = vm.SwapFree fields["swap_total"] = vm.SwapTotal - fields["vmalloc_chunk"] = vm.VMallocChunk - fields["vmalloc_total"] = vm.VMallocTotal - fields["vmalloc_used"] = vm.VMallocUsed - fields["write_back_tmp"] = vm.WritebackTmp - fields["write_back"] = vm.Writeback + fields["vmalloc_chunk"] = vm.VmallocChunk + fields["vmalloc_total"] = vm.VmallocTotal + fields["vmalloc_used"] = vm.VmallocUsed + fields["write_back_tmp"] = vm.WriteBackTmp + fields["write_back"] = vm.WriteBack } acc.AddGauge("mem", fields, nil) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go index 626a1806c4055..06561875753c9 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/memory_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) @@ -42,16 +42,16 @@ func TestMemStats(t *testing.T) { Mapped: 42236, PageTables: 1236, Shared: 0, - SReclaimable: 1923022848, - SUnreclaim: 157728768, + Sreclaimable: 1923022848, + Sunreclaim: 157728768, SwapCached: 0, SwapFree: 524280, SwapTotal: 524280, - VMallocChunk: 3872908, - VMallocTotal: 3874808, - VMallocUsed: 1416, - Writeback: 0, - WritebackTmp: 0, + VmallocChunk: 3872908, + VmallocTotal: 3874808, + VmallocUsed: 1416, + WriteBack: 0, + WriteBackTmp: 0, } mps.On("VMStat").Return(vms, nil) diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index 721be913054a7..e3f8fafea48d6 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -2,7 +2,7 @@ This plugin gathers statistics data from a Memcached server. -### Configuration: +## Configuration ```toml # Read metrics from one or many memcached servers. @@ -14,7 +14,7 @@ This plugin gathers statistics data from a Memcached server. # unix_sockets = ["/var/run/memcached.sock"] ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the *memcached* measurement. @@ -63,22 +63,22 @@ Fields: Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). -### Tags: +## Tags * Memcached measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) -### Sample Queries: +## Sample Queries You can use the following query to get the average get hit and miss ratio, as well as the total average size of cached items, number of cached items and average connection counts per server. -``` +```sql SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_get) as get_misses_ratio, mean(bytes), mean(curr_items), mean(curr_connections) FROM memcached WHERE time > now() - 1h GROUP BY server ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter memcached --test memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455 ``` diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 1d0807625b31b..1ebfe65bad6fb 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { @@ -32,7 +32,7 @@ func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("memcached", metric), metric) + require.True(t, acc.HasInt64Field("memcached", metric), metric) } } diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 2845881880d95..0d48164fc8984 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -3,7 +3,7 @@ This input plugin gathers metrics from Mesos. For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. -### Configuration: +## Configuration ```toml # Telegraf plugin for gathering metrics from N Mesos masters @@ -53,280 +53,282 @@ For more information, please check the [Mesos Observability Metrics](http://meso By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default values. User needs to specify master/slave nodes this plugin will gather metrics from. -### Measurements & Fields: +## Measurements & Fields Mesos master metric groups - resources - - master/cpus_percent - - master/cpus_used - - master/cpus_total - - master/cpus_revocable_percent - - master/cpus_revocable_total - - master/cpus_revocable_used - - master/disk_percent - - master/disk_used - - master/disk_total - - master/disk_revocable_percent - - master/disk_revocable_total - - master/disk_revocable_used - - master/gpus_percent - - master/gpus_used - - master/gpus_total - - master/gpus_revocable_percent - - master/gpus_revocable_total - - master/gpus_revocable_used - - master/mem_percent - - master/mem_used - - master/mem_total - - master/mem_revocable_percent - - master/mem_revocable_total - - master/mem_revocable_used + - master/cpus_percent + - master/cpus_used + - master/cpus_total + - master/cpus_revocable_percent + - master/cpus_revocable_total + - master/cpus_revocable_used + - master/disk_percent + - master/disk_used + - master/disk_total + - master/disk_revocable_percent + - master/disk_revocable_total + - master/disk_revocable_used + - master/gpus_percent + - master/gpus_used + - master/gpus_total + - master/gpus_revocable_percent + - master/gpus_revocable_total + - master/gpus_revocable_used + - master/mem_percent + - master/mem_used + - master/mem_total + - master/mem_revocable_percent + - master/mem_revocable_total + - master/mem_revocable_used - master - - master/elected - - master/uptime_secs + - master/elected + - master/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - slaves - - master/slave_registrations - - master/slave_removals - - master/slave_reregistrations - - master/slave_shutdowns_scheduled - - master/slave_shutdowns_canceled - - master/slave_shutdowns_completed - - master/slaves_active - - master/slaves_connected - - master/slaves_disconnected - - master/slaves_inactive - - master/slave_unreachable_canceled - - master/slave_unreachable_completed - - master/slave_unreachable_scheduled - - master/slaves_unreachable + - master/slave_registrations + - master/slave_removals + - master/slave_reregistrations + - master/slave_shutdowns_scheduled + - master/slave_shutdowns_canceled + - master/slave_shutdowns_completed + - master/slaves_active + - master/slaves_connected + - master/slaves_disconnected + - master/slaves_inactive + - master/slave_unreachable_canceled + - master/slave_unreachable_completed + - master/slave_unreachable_scheduled + - master/slaves_unreachable - frameworks - - master/frameworks_active - - master/frameworks_connected - - master/frameworks_disconnected - - master/frameworks_inactive - - master/outstanding_offers + - master/frameworks_active + - master/frameworks_connected + - master/frameworks_disconnected + - master/frameworks_inactive + - master/outstanding_offers - framework offers - - master/frameworks/subscribed - - master/frameworks/calls_total - - master/frameworks/calls - - master/frameworks/events_total - - master/frameworks/events - - master/frameworks/operations_total - - master/frameworks/operations - - master/frameworks/tasks/active - - master/frameworks/tasks/terminal - - master/frameworks/offers/sent - - master/frameworks/offers/accepted - - master/frameworks/offers/declined - - master/frameworks/offers/rescinded - - master/frameworks/roles/suppressed + - master/frameworks/subscribed + - master/frameworks/calls_total + - master/frameworks/calls + - master/frameworks/events_total + - master/frameworks/events + - master/frameworks/operations_total + - master/frameworks/operations + - master/frameworks/tasks/active + - master/frameworks/tasks/terminal + - master/frameworks/offers/sent + - master/frameworks/offers/accepted + - master/frameworks/offers/declined + - master/frameworks/offers/rescinded + - master/frameworks/roles/suppressed - tasks - - master/tasks_error - - master/tasks_failed - - master/tasks_finished - - master/tasks_killed - - master/tasks_lost - - master/tasks_running - - master/tasks_staging - - master/tasks_starting - - master/tasks_dropped - - master/tasks_gone - - master/tasks_gone_by_operator - - master/tasks_killing - - master/tasks_unreachable + - master/tasks_error + - master/tasks_failed + - master/tasks_finished + - master/tasks_killed + - master/tasks_lost + - master/tasks_running + - master/tasks_staging + - master/tasks_starting + - master/tasks_dropped + - master/tasks_gone + - master/tasks_gone_by_operator + - master/tasks_killing + - master/tasks_unreachable - messages - - master/invalid_executor_to_framework_messages - - master/invalid_framework_to_executor_messages - - master/invalid_status_update_acknowledgements - - master/invalid_status_updates - - master/dropped_messages - - master/messages_authenticate - - master/messages_deactivate_framework - - master/messages_decline_offers - - master/messages_executor_to_framework - - master/messages_exited_executor - - master/messages_framework_to_executor - - master/messages_kill_task - - master/messages_launch_tasks - - master/messages_reconcile_tasks - - master/messages_register_framework - - master/messages_register_slave - - master/messages_reregister_framework - - master/messages_reregister_slave - - master/messages_resource_request - - master/messages_revive_offers - - master/messages_status_update - - master/messages_status_update_acknowledgement - - master/messages_unregister_framework - - master/messages_unregister_slave - - master/messages_update_slave - - master/recovery_slave_removals - - master/slave_removals/reason_registered - - master/slave_removals/reason_unhealthy - - master/slave_removals/reason_unregistered - - master/valid_framework_to_executor_messages - - master/valid_status_update_acknowledgements - - master/valid_status_updates - - master/task_lost/source_master/reason_invalid_offers - - master/task_lost/source_master/reason_slave_removed - - master/task_lost/source_slave/reason_executor_terminated - - master/valid_executor_to_framework_messages - - master/invalid_operation_status_update_acknowledgements - - master/messages_operation_status_update_acknowledgement - - master/messages_reconcile_operations - - master/messages_suppress_offers - - master/valid_operation_status_update_acknowledgements + - master/invalid_executor_to_framework_messages + - master/invalid_framework_to_executor_messages + - master/invalid_status_update_acknowledgements + - master/invalid_status_updates + - master/dropped_messages + - master/messages_authenticate + - master/messages_deactivate_framework + - master/messages_decline_offers + - master/messages_executor_to_framework + - master/messages_exited_executor + - master/messages_framework_to_executor + - master/messages_kill_task + - master/messages_launch_tasks + - master/messages_reconcile_tasks + - master/messages_register_framework + - master/messages_register_slave + - master/messages_reregister_framework + - master/messages_reregister_slave + - master/messages_resource_request + - master/messages_revive_offers + - master/messages_status_update + - master/messages_status_update_acknowledgement + - master/messages_unregister_framework + - master/messages_unregister_slave + - master/messages_update_slave + - master/recovery_slave_removals + - master/slave_removals/reason_registered + - master/slave_removals/reason_unhealthy + - master/slave_removals/reason_unregistered + - master/valid_framework_to_executor_messages + - master/valid_status_update_acknowledgements + - master/valid_status_updates + - master/task_lost/source_master/reason_invalid_offers + - master/task_lost/source_master/reason_slave_removed + - master/task_lost/source_slave/reason_executor_terminated + - master/valid_executor_to_framework_messages + - master/invalid_operation_status_update_acknowledgements + - master/messages_operation_status_update_acknowledgement + - master/messages_reconcile_operations + - master/messages_suppress_offers + - master/valid_operation_status_update_acknowledgements - evqueue - - master/event_queue_dispatches - - master/event_queue_http_requests - - master/event_queue_messages - - master/operator_event_stream_subscribers + - master/event_queue_dispatches + - master/event_queue_http_requests + - master/event_queue_messages + - master/operator_event_stream_subscribers - registrar - - registrar/state_fetch_ms - - registrar/state_store_ms - - registrar/state_store_ms/max - - registrar/state_store_ms/min - - registrar/state_store_ms/p50 - - registrar/state_store_ms/p90 - - registrar/state_store_ms/p95 - - registrar/state_store_ms/p99 - - registrar/state_store_ms/p999 - - registrar/state_store_ms/p9999 - - registrar/state_store_ms/count - - registrar/log/ensemble_size - - registrar/log/recovered - - registrar/queued_operations - - registrar/registry_size_bytes + - registrar/state_fetch_ms + - registrar/state_store_ms + - registrar/state_store_ms/max + - registrar/state_store_ms/min + - registrar/state_store_ms/p50 + - registrar/state_store_ms/p90 + - registrar/state_store_ms/p95 + - registrar/state_store_ms/p99 + - registrar/state_store_ms/p999 + - registrar/state_store_ms/p9999 + - registrar/state_store_ms/count + - registrar/log/ensemble_size + - registrar/log/recovered + - registrar/queued_operations + - registrar/registry_size_bytes - allocator - - allocator/allocation_run_ms - - allocator/allocation_run_ms/count - - allocator/allocation_run_ms/max - - allocator/allocation_run_ms/min - - allocator/allocation_run_ms/p50 - - allocator/allocation_run_ms/p90 - - allocator/allocation_run_ms/p95 - - allocator/allocation_run_ms/p99 - - allocator/allocation_run_ms/p999 - - allocator/allocation_run_ms/p9999 - - allocator/allocation_runs - - allocator/allocation_run_latency_ms - - allocator/allocation_run_latency_ms/count - - allocator/allocation_run_latency_ms/max - - allocator/allocation_run_latency_ms/min - - allocator/allocation_run_latency_ms/p50 - - allocator/allocation_run_latency_ms/p90 - - allocator/allocation_run_latency_ms/p95 - - allocator/allocation_run_latency_ms/p99 - - allocator/allocation_run_latency_ms/p999 - - allocator/allocation_run_latency_ms/p9999 - - allocator/roles/shares/dominant - - allocator/event_queue_dispatches - - allocator/offer_filters/roles/active - - allocator/quota/roles/resources/offered_or_allocated - - allocator/quota/roles/resources/guarantee - - allocator/resources/cpus/offered_or_allocated - - allocator/resources/cpus/total - - allocator/resources/disk/offered_or_allocated - - allocator/resources/disk/total - - allocator/resources/mem/offered_or_allocated - - allocator/resources/mem/total + - allocator/allocation_run_ms + - allocator/allocation_run_ms/count + - allocator/allocation_run_ms/max + - allocator/allocation_run_ms/min + - allocator/allocation_run_ms/p50 + - allocator/allocation_run_ms/p90 + - allocator/allocation_run_ms/p95 + - allocator/allocation_run_ms/p99 + - allocator/allocation_run_ms/p999 + - allocator/allocation_run_ms/p9999 + - allocator/allocation_runs + - allocator/allocation_run_latency_ms + - allocator/allocation_run_latency_ms/count + - allocator/allocation_run_latency_ms/max + - allocator/allocation_run_latency_ms/min + - allocator/allocation_run_latency_ms/p50 + - allocator/allocation_run_latency_ms/p90 + - allocator/allocation_run_latency_ms/p95 + - allocator/allocation_run_latency_ms/p99 + - allocator/allocation_run_latency_ms/p999 + - allocator/allocation_run_latency_ms/p9999 + - allocator/roles/shares/dominant + - allocator/event_queue_dispatches + - allocator/offer_filters/roles/active + - allocator/quota/roles/resources/offered_or_allocated + - allocator/quota/roles/resources/guarantee + - allocator/resources/cpus/offered_or_allocated + - allocator/resources/cpus/total + - allocator/resources/disk/offered_or_allocated + - allocator/resources/disk/total + - allocator/resources/mem/offered_or_allocated + - allocator/resources/mem/total Mesos slave metric groups + - resources - - slave/cpus_percent - - slave/cpus_used - - slave/cpus_total - - slave/cpus_revocable_percent - - slave/cpus_revocable_total - - slave/cpus_revocable_used - - slave/disk_percent - - slave/disk_used - - slave/disk_total - - slave/disk_revocable_percent - - slave/disk_revocable_total - - slave/disk_revocable_used - - slave/gpus_percent - - slave/gpus_used - - slave/gpus_total, - - slave/gpus_revocable_percent - - slave/gpus_revocable_total - - slave/gpus_revocable_used - - slave/mem_percent - - slave/mem_used - - slave/mem_total - - slave/mem_revocable_percent - - slave/mem_revocable_total - - slave/mem_revocable_used + - slave/cpus_percent + - slave/cpus_used + - slave/cpus_total + - slave/cpus_revocable_percent + - slave/cpus_revocable_total + - slave/cpus_revocable_used + - slave/disk_percent + - slave/disk_used + - slave/disk_total + - slave/disk_revocable_percent + - slave/disk_revocable_total + - slave/disk_revocable_used + - slave/gpus_percent + - slave/gpus_used + - slave/gpus_total, + - slave/gpus_revocable_percent + - slave/gpus_revocable_total + - slave/gpus_revocable_used + - slave/mem_percent + - slave/mem_used + - slave/mem_total + - slave/mem_revocable_percent + - slave/mem_revocable_total + - slave/mem_revocable_used - agent - - slave/registered - - slave/uptime_secs + - slave/registered + - slave/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - executors - - containerizer/mesos/container_destroy_errors - - slave/container_launch_errors - - slave/executors_preempted - - slave/frameworks_active - - slave/executor_directory_max_allowed_age_secs - - slave/executors_registering - - slave/executors_running - - slave/executors_terminated - - slave/executors_terminating - - slave/recovery_errors + - containerizer/mesos/container_destroy_errors + - slave/container_launch_errors + - slave/executors_preempted + - slave/frameworks_active + - slave/executor_directory_max_allowed_age_secs + - slave/executors_registering + - slave/executors_running + - slave/executors_terminated + - slave/executors_terminating + - slave/recovery_errors - tasks - - slave/tasks_failed - - slave/tasks_finished - - slave/tasks_killed - - slave/tasks_lost - - slave/tasks_running - - slave/tasks_staging - - slave/tasks_starting + - slave/tasks_failed + - slave/tasks_finished + - slave/tasks_killed + - slave/tasks_lost + - slave/tasks_running + - slave/tasks_staging + - slave/tasks_starting - messages - - slave/invalid_framework_messages - - slave/invalid_status_updates - - slave/valid_framework_messages - - slave/valid_status_updates + - slave/invalid_framework_messages + - slave/invalid_status_updates + - slave/valid_framework_messages + - slave/valid_status_updates -### Tags: +## Tags - All master/slave measurements have the following tags: - - server (network location of server: `host:port`) - - url (URL origin of server: `scheme://host:port`) - - role (master/slave) + - server (network location of server: `host:port`) + - url (URL origin of server: `scheme://host:port`) + - role (master/slave) - All master measurements have the extra tags: - - state (leader/follower) + - state (leader/follower) -### Example Output: -``` +## Example Output + +```shell $ telegraf --config ~/mesos.conf --input-filter mesos --test * Plugin: mesos, Collection 1 mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 @@ -347,4 +349,3 @@ master/mem_revocable_used=0,master/mem_total=1002, master/mem_used=0,master/messages_authenticate=0, master/messages_deactivate_framework=0 ... ``` - diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 68203c9d480cb..991f8a9fd7003 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "io" - "log" "net" "net/http" "net/url" @@ -23,7 +22,7 @@ type Role string const ( MASTER Role = "master" - SLAVE = "slave" + SLAVE Role = "slave" ) type Mesos struct { @@ -100,7 +99,7 @@ func (m *Mesos) Description() string { return "Telegraf plugin for gathering metrics from N Mesos masters" } -func parseURL(s string, role Role) (*url.URL, error) { +func (m *Mesos) parseURL(s string, role Role) (*url.URL, error) { if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") { host, port, err := net.SplitHostPort(s) // no port specified @@ -115,7 +114,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) + m.Log.Warnf("using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -139,7 +138,7 @@ func (m *Mesos) initialize() error { m.masterURLs = make([]*url.URL, 0, len(m.Masters)) for _, master := range m.Masters { - u, err := parseURL(master, MASTER) + u, err := m.parseURL(master, MASTER) if err != nil { return err } @@ -150,7 +149,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = make([]*url.URL, 0, len(m.Slaves)) for _, slave := range m.Slaves { - u, err := parseURL(slave, SLAVE) + u, err := m.parseURL(slave, SLAVE) if err != nil { return err } @@ -241,11 +240,11 @@ func metricsDiff(role Role, w []string) []string { } // masterBlocks serves as kind of metrics registry grouping them in sets -func getMetrics(role Role, group string) []string { - m := make(map[string][]string) +func (m *Mesos) getMetrics(role Role, group string) []string { + metrics := make(map[string][]string) if role == MASTER { - m["resources"] = []string{ + metrics["resources"] = []string{ "master/cpus_percent", "master/cpus_used", "master/cpus_total", @@ -272,12 +271,12 @@ func getMetrics(role Role, group string) []string { "master/mem_revocable_used", } - m["master"] = []string{ + metrics["master"] = []string{ "master/elected", "master/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -286,7 +285,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["agents"] = []string{ + metrics["agents"] = []string{ "master/slave_registrations", "master/slave_removals", "master/slave_reregistrations", @@ -303,7 +302,7 @@ func getMetrics(role Role, group string) []string { "master/slaves_unreachable", } - m["frameworks"] = []string{ + metrics["frameworks"] = []string{ "master/frameworks_active", "master/frameworks_connected", "master/frameworks_disconnected", @@ -314,10 +313,10 @@ func getMetrics(role Role, group string) []string { // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. // These empty groups are included to prevent the "unknown metrics group" info log below. // filterMetrics() filters these metrics by looking for names with the corresponding prefix. - m["framework_offers"] = []string{} - m["allocator"] = []string{} + metrics["framework_offers"] = []string{} + metrics["allocator"] = []string{} - m["tasks"] = []string{ + metrics["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", "master/tasks_finished", @@ -333,7 +332,7 @@ func getMetrics(role Role, group string) []string { "master/tasks_unreachable", } - m["messages"] = []string{ + metrics["messages"] = []string{ "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/invalid_status_update_acknowledgements", @@ -377,14 +376,14 @@ func getMetrics(role Role, group string) []string { "master/valid_operation_status_update_acknowledgements", } - m["evqueue"] = []string{ + metrics["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", "master/operator_event_stream_subscribers", } - m["registrar"] = []string{ + metrics["registrar"] = []string{ "registrar/state_fetch_ms", "registrar/state_store_ms", "registrar/state_store_ms/max", @@ -402,7 +401,7 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/count", } } else if role == SLAVE { - m["resources"] = []string{ + metrics["resources"] = []string{ "slave/cpus_percent", "slave/cpus_used", "slave/cpus_total", @@ -429,12 +428,12 @@ func getMetrics(role Role, group string) []string { "slave/mem_revocable_used", } - m["agent"] = []string{ + metrics["agent"] = []string{ "slave/registered", "slave/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -443,7 +442,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["executors"] = []string{ + metrics["executors"] = []string{ "containerizer/mesos/container_destroy_errors", "slave/container_launch_errors", "slave/executors_preempted", @@ -456,7 +455,7 @@ func getMetrics(role Role, group string) []string { "slave/recovery_errors", } - m["tasks"] = []string{ + metrics["tasks"] = []string{ "slave/tasks_failed", "slave/tasks_finished", "slave/tasks_killed", @@ -466,7 +465,7 @@ func getMetrics(role Role, group string) []string { "slave/tasks_starting", } - m["messages"] = []string{ + metrics["messages"] = []string{ "slave/invalid_framework_messages", "slave/invalid_status_updates", "slave/valid_framework_messages", @@ -474,10 +473,10 @@ func getMetrics(role Role, group string) []string { } } - ret, ok := m[group] + ret, ok := metrics[group] if !ok { - log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) + m.Log.Infof("unknown role %q metrics group: %s", role, group) return []string{} } @@ -512,7 +511,7 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { // All other metrics have predictable names. We can use getMetrics() to retrieve them. default: - for _, v := range getMetrics(role, k) { + for _, v := range m.getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { delete(*metrics, v) } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 4b6d5ab74d371..2605ddd4678c2 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -10,8 +10,9 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} @@ -340,7 +341,7 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } @@ -357,7 +358,7 @@ func TestMasterFilter(t *testing.T) { // Assert unexpected metrics are not present. for _, v := range b { - for _, x := range getMetrics(MASTER, v) { + for _, x := range m.getMetrics(MASTER, v) { _, ok := masterMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } @@ -402,13 +403,13 @@ func TestSlaveFilter(t *testing.T) { m.filterMetrics(SLAVE, &slaveMetrics) for _, v := range b { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { - for _, x := range getMetrics(SLAVE, v) { + for _, x := range m.getMetrics(SLAVE, v) { _, ok := slaveMetrics[x] require.Truef(t, ok, "Didn't find key %s, it should present.", x) } diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 026c9e3b3fb99..e5f1f00ef26b8 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -7,7 +7,7 @@ This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14. When using an version of Minecraft earlier than 1.13, be aware that the values for some criterion has changed and may need to be modified. -#### Server Setup +## Server Setup Enable [RCON][] on the Minecraft server, add this to your server configuration in the [server.properties][] file: @@ -24,22 +24,25 @@ from the server console, or over an RCON connection. When getting started pick an easy to test objective. This command will add an objective that counts the number of times a player has jumped: -``` + +```sh /scoreboard objectives add jumps minecraft.custom:minecraft.jump ``` Once a player has triggered the event they will be added to the scoreboard, you can then list all players with recorded scores: -``` + +```sh /scoreboard players list ``` View the current scores with a command, substituting your player name: -``` + +```sh /scoreboard players list Etho ``` -### Configuration +## Configuration ```toml [[inputs.minecraft]] @@ -53,7 +56,7 @@ View the current scores with a command, substituting your player name: password = "" ``` -### Metrics +## Metrics - minecraft - tags: @@ -64,15 +67,17 @@ View the current scores with a command, substituting your player name: - fields: - `` (integer, count) -### Sample Queries: +## Sample Queries Get the number of jumps per player in the last hour: + ```sql SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" ``` -### Example Output: -``` +## Example Output + +```shell minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000 minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 641a8ae75db9f..4aa712d4b04f4 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -45,17 +45,17 @@ func (c *connector) Connect() (Connection, error) { return nil, err } - rcon, err := rcon.NewClient(c.hostname, p) + client, err := rcon.NewClient(c.hostname, p) if err != nil { return nil, err } - _, err = rcon.Authorize(c.password) + _, err = client.Authorize(c.password) if err != nil { return nil, err } - return &connection{rcon: rcon}, nil + return &connection{client: client}, nil } func newClient(connector Connector) *client { @@ -111,11 +111,11 @@ func (c *client) Scores(player string) ([]Score, error) { } type connection struct { - rcon *rcon.Client + client *rcon.Client } func (c *connection) Execute(command string) (string, error) { - packet, err := c.rcon.Execute(command) + packet, err := c.client.Execute(command) if err != nil { return "", err } diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index ccc020edb4fb6..6efce2ba5c4b1 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -67,17 +67,17 @@ func (p Packet) Compile() (payload []byte, err error) { var padding [PacketPaddingSize]byte if err = binary.Write(&buffer, binary.LittleEndian, &size); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Challenge); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Type); nil != err { - return + return nil, err } - if _, err := buffer.WriteString(p.Body); err != nil { + if _, err = buffer.WriteString(p.Body); err != nil { return nil, err } - if _, err := buffer.Write(padding[:]); err != nil { + if _, err = buffer.Write(padding[:]); err != nil { return nil, err } @@ -95,16 +95,13 @@ func NewPacket(challenge, typ int32, body string) (packet *Packet) { // or a potential error. func (c *Client) Authorize(password string) (response *Packet, err error) { if response, err = c.Send(Auth, password); nil == err { - if response.Header.Type == AuthResponse { - c.Authorized = true - } else { - err = ErrFailedAuthorization - response = nil - return + if response.Header.Type != AuthResponse { + return nil, ErrFailedAuthorization } + c.Authorized = true } - return + return response, err } // Execute calls Send with the appropriate command type and the provided @@ -114,7 +111,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) { return c.Send(Exec, command) } -// Sends accepts the commands type and its string to execute to the clients server, +// Send accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, // and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned @@ -213,5 +210,5 @@ func NewClient(host string, port int) (client *Client, err error) { client.Host = host client.Port = port client.Connection, err = net.Dial("tcp", fmt.Sprintf("%v:%v", client.Host, client.Port)) - return + return client, err } diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index ac01e140b695c..fd9b92e513178 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -3,7 +3,7 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding Registers via Modbus TCP or Modbus RTU/ASCII. -### Configuration +## Example configuration ```toml [[inputs.modbus]] @@ -46,6 +46,13 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## Note: You have to enable telegraf's debug mode to see those messages! # debug_connection = false + ## Define the configuration schema + ## |---register -- define fields per register type in the original style (only supports one slave ID) + ## |---request -- define fields on a requests base + configuration_type = "register" + + ## --- "register" configuration style --- + ## Measurements ## @@ -74,10 +81,11 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) - ## FLOAT32 (deprecated), FIXED, UFIXED (fixed-point representation on input) - ## scale - the final numeric variable representation - ## address - variable address + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) + ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) + ## scale - the final numeric variable representation + ## address - variable address holding_registers = [ { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, @@ -93,6 +101,98 @@ Registers via Modbus TCP or Modbus RTU/ASCII. { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, ] + ## --- "request" configuration style --- + + ## Per request definition + ## + + ## Define a request sent to the device + ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. + [[inputs.modbus.request]] + ## ID of the modbus slave device to query. + ## If you need to query multiple slave-devices, create several "request" definitions. + slave_id = 1 + + ## Byte order of the data. + ## |---ABCD -- Big Endian (Motorola) + ## |---DCBA -- Little Endian (Intel) + ## |---BADC -- Big Endian with byte swap + ## |---CDAB -- Little Endian with byte swap + byte_order = "ABCD" + + ## Type of the register for the request + ## Can be "coil", "discrete", "holding" or "input" + register = "coil" + + ## Name of the measurement. + ## Can be overriden by the individual field definitions. Defaults to "modbus" + # measurement = "modbus" + + ## Field definitions + ## Analog Variables, Input Registers and Holding Registers + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name *1 - field name + ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## scale *1,2 - (optional) factor to scale the variable with + ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## measurement *1 - (optional) measurement name, defaults to the setting of the request + ## omit - (optional) omit this field. Useful to leave out single values when querying many registers + ## with a single request. Defaults to "false". + ## + ## *1: Those fields are ignored if field is omitted ("omit"=true) + ## + ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types + ## the fields are output as zero or one in UINT64 format by default. + + ## Coil / discrete input example + fields = [ + { address=0, name="motor1_run"}, + { address=1, name="jog", measurement="motor"}, + { address=2, name="motor1_stop", omit=true}, + { address=3, name="motor1_overheating"}, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Holding example + ## All of those examples will result in FLOAT64 field outputs + slave_id = 1 + byte_order = "DCBA" + register = "holding" + fields = [ + { address=0, name="voltage", type="INT16", scale=0.1 }, + { address=1, name="current", type="INT32", scale=0.001 }, + { address=3, name="power", type="UINT32", omit=true }, + { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, + { address=7, name="frequency", type="UINT32", scale=0.1 }, + { address=8, name="power_factor", type="INT64", scale=0.01 }, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Input example with type conversions + slave_id = 1 + byte_order = "ABCD" + register = "input" + fields = [ + { address=0, name="rpm", type="INT16" }, # will result in INT64 field + { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field + { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field + { address=4, name="hours", type="UINT32" }, # will result in UIN64 field + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + ## Enable workarounds required by some devices to work correctly # [inputs.modbus.workarounds] ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. @@ -103,17 +203,33 @@ Registers via Modbus TCP or Modbus RTU/ASCII. # close_connection_after_gather = false ``` -### Notes +## Notes + You can debug Modbus connection issues by enabling `debug_connection`. To see those debug messages Telegraf has to be started with debugging enabled (i.e. with `--debug` option). Please be aware that connection tracing will produce a lot of messages and should **NOT** be used in production environments. Please use `pause_between_requests` with care. Especially make sure that the total gather time, including the pause(s), does not exceed the configured collection interval. Note, that pauses add up if multiple requests are sent! -### Metrics +## Configuration styles + +The modbus plugin supports multiple configuration styles that can be set using the `configuration_type` setting. The different styles are described below. Please note that styles cannot be mixed, i.e. only the settings belonging to the configured `configuration_type` are used for constructing _modbus_ requests and creation of metrics. + +Directly jump to the styles: + +- [original / register plugin style](#register-configuration-style) +- [per-request style](#request-configuration-style) + +--- + +### `register` configuration style + +This is the original style used by this plugin. It allows a per-register configuration for a single slave-device. + +#### Metrics Metric are custom and configured using the `discrete_inputs`, `coils`, `holding_register` and `input_registers` options. -### Usage of `data_type` +#### Usage of `data_type` The field `data_type` defines the representation of the data value on input from the modbus registers. The input values are then converted from the given `data_type` to a type that is apropriate when @@ -122,16 +238,16 @@ integer or floating-point-number. The size of the output type is assumed to be l for all supported input types. The mapping from the input type to the output type is fixed and cannot be configured. -#### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` +##### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` These types are used for integer input values. Select the one that matches your modbus data source. -#### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` +##### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` Use these types if your modbus registers contain a value that is encoded in this format. These types always include the sign and therefore there exists no variant. -#### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) +##### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) These types are handled as an integer type on input, but are converted to floating point representation for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point @@ -148,9 +264,85 @@ with N decimal places'. (FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion from unsigned values). -### Trouble shooting +--- + +### `request` configuration style + +This sytle can be used to specify the modbus requests directly. It allows to specify multiple `[[inputs.modbus.request]]` sections including multiple slave-devices. This way, _modbus_ gateway devices can be queried. Please not that _requests_ might be split for non-consecutive addresses. If you want to avoid this behavior please add _fields_ with the `omit` flag set filling the gaps between addresses. + +#### Slave device + +You can use the `slave_id` setting to specify the ID of the slave device to query. It should be specified for each request and defaults to zero otherwise. Please note, only one `slave_id` can be specified for a request. + +#### Byte order of the register + +The `byte_order` setting specifies the byte- and word-order of the registers. It can be set to `ABCD` for _big endian (Motorola)_ or `DCBA` for _little endian (Intel)_ format as well as `BADC` and `CDAB` for _big endian_ or _little endian_ with _byte swap_. + +#### Register type + +The `register` setting specifies the modbus register-set to query and can be set to `coil`, `discrete`, `holding` or `input`. + +#### Per-request measurement setting + +You can specify the name of the measurement for the following field definitions using the `measurement` setting. If the setting is omitted `modbus` is used. Furthermore, the measurement value can be overridden by each field individually. + +#### Field definitions + +Each `request` can contain a list of fields to collect from the modbus device. + +##### address + +A field is identified by an `address` that reflects the modbus register address. You can usually find the address values for the different datapoints in the datasheet of your modbus device. This is a mandatory setting. + +For _coil_ and _discrete input_ registers this setting specifies the __bit__ containing the value of the field. + +##### name + +Using the `name` setting you can specify the field-name in the metric as output by the plugin. This setting is ignored if the field's `omit` is set to `true` and can be omitted in this case. + +__Please note:__ There cannot be multiple fields with the same `name` in one metric identified by `measurement`, `slave_id` and `register`. + +##### register datatype + +The `register` setting specifies the datatype of the modbus register and can be set to `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64` or `UINT64` for integer types or `FLOAT32` and `FLOAT64` for IEEE 754 binary representations of floating point values. Usually the datatype of the register is listed in the datasheet of your modbus device in relation to the `address` described above. + + This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. + +##### scaling + +You can use the `scale` setting to scale the register values, e.g. if the register contains a fix-point values in `UINT32` format with two decimal places for example. To convert the read register value to the actual value you can set the `scale=0.01`. The scale is used as a factor as `field_value * scale`. + +This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. + +__Please note:__ The resulting field-type will be set to `FLOAT64` if no output format is specified. + +##### output datatype + +Using the `output` setting you might explicitly specify the output field-datatype. The `output` type can be `INT64`, `UINT64` or `FLOAT64`. If not set explicitly, the output type is guessed as follows: If `scale` is set to a non-zero value, the output type is `FLOAT64`. Otherwise, the output type corresponds to the register datatype _class_, i.e. `INT*` will result in `INT64`, `UINT*` in `UINT64` and `FLOAT*` in `FLOAT64`. + +This setting is ignored if the field's `omit` is set to `true` or if the `register` type is a bit-type (`coil` or `discrete`) and can be omitted in these cases. For `coil` and `discrete` registers the field-value is output as zero or one in `UINT16` format. + +#### per-field measurement setting + +The `measurement` setting can be used to override the measurement name on a per-field basis. This might be useful if you can to split the fields in one request to multiple measurements. If not specified, the value specified in the [`request` section](#per-request-measurement-setting) or, if also omitted, `modbus` is used. + +This setting is ignored if the field's `omit` is set to `true` and can be omitted in this case. + +#### omitting a field + +When specifying `omit=true`, the corresponding field will be ignored when collecting the metric but is taken into account when constructing the modbus requests. This way, you can fill "holes" in the addresses to construct consecutive address ranges resulting in a single request. Using a single modbus request can be beneficial as the values are all collected at the same point in time. + +#### Tags definitions + +Each `request` can be accompanied by tags valid for this request. +__Please note:__ These tags take precedence over predefined tags such as `name`, `type` or `slave_id`. + +--- + +## Trouble shooting + +### Strange data -#### Strange data Modbus documentations are often a mess. People confuse memory-address (starts at one) and register address (starts at zero) or stay unclear about the used word-order. Furthermore, there are some non-standard implementations that also swap the bytes within the register word (16-bit). @@ -163,8 +355,10 @@ In case you get an `exception '2' (illegal data address)` error you might try to In case you see strange values, the `byte_order` might be off. You can either probe all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or you set `byte_order="ABCD" data_type="UINT32"` and use the resulting value(s) in an online converter like [this](https://www.scadacore.com/tools/programming-calculators/online-hex-converter/). This makes especially sense if you don't want to mess with the device, deal with 64-bit values and/or don't know the `data_type` of your register (e.g. fix-point floating values vs. IEEE floating point). If your data still looks corrupted, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). +If nothing helps, please post your configuration, error message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels (forum, slack or as issue). + +### Workarounds -#### Workarounds Some Modbus devices need special read characteristics when reading data and will fail otherwise. For example, there are certain serial devices that need a certain pause between register read requests. Others might only offer a limited number of simultaneously connected devices, like serial devices or some ModbusTCP devices. In case you need to access those devices in parallel you might want to disconnect immediately after the plugin finished reading. To allow this plugin to also handle those "special" devices there is the `workarounds` configuration options. In case your documentation states certain read requirements or you get read timeouts or other read errors you might want to try one or more workaround options. @@ -172,7 +366,7 @@ If you find that other/more workarounds are required for your device, please let In case your device needs a workaround that is not yet implemented, please open an issue or submit a pull-request. -### Example Output +## Example Output ```sh $ ./telegraf -config telegraf.conf -input-filter modbus -test diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go index cbf36cab15524..552cc3ff63727 100644 --- a/plugins/inputs/modbus/configuration.go +++ b/plugins/inputs/modbus/configuration.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import "fmt" @@ -12,6 +15,7 @@ const ( type Configuration interface { Check() error Process() (map[byte]requestSet, error) + SampleConfigPart() string } func removeDuplicates(elements []uint16) []uint16 { @@ -33,7 +37,7 @@ func normalizeInputDatatype(dataType string) (string, error) { case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": return dataType, nil } - return "unknown", fmt.Errorf("unknown type %q", dataType) + return "unknown", fmt.Errorf("unknown input type %q", dataType) } func normalizeOutputDatatype(dataType string) (string, error) { @@ -43,7 +47,7 @@ func normalizeOutputDatatype(dataType string) (string, error) { case "INT64", "UINT64", "FLOAT64": return dataType, nil } - return "unknown", fmt.Errorf("unknown type %q", dataType) + return "unknown", fmt.Errorf("unknown output type %q", dataType) } func normalizeByteOrder(byteOrder string) (string, error) { diff --git a/plugins/inputs/modbus/configuration_original.go b/plugins/inputs/modbus/configuration_register.go similarity index 69% rename from plugins/inputs/modbus/configuration_original.go rename to plugins/inputs/modbus/configuration_register.go index cf4b2e1241b8e..2e1ad34a65247 100644 --- a/plugins/inputs/modbus/configuration_original.go +++ b/plugins/inputs/modbus/configuration_register.go @@ -1,9 +1,62 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( "fmt" ) +const sampleConfigPartPerRegister = ` + ## Per register definition + ## + + ## Digital Variables, Discrete Inputs and Coils + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) + ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] +` + type fieldDefinition struct { Measurement string `toml:"measurement"` Name string `toml:"name"` @@ -21,23 +74,43 @@ type ConfigurationOriginal struct { InputRegisters []fieldDefinition `toml:"input_registers"` } +func (c *ConfigurationOriginal) SampleConfigPart() string { + return sampleConfigPartPerRegister +} + +func (c *ConfigurationOriginal) Check() error { + if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + return err + } + + return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) +} + func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) { - coil, err := c.initRequests(c.Coils, cCoils, maxQuantityCoils) + coil, err := c.initRequests(c.Coils, maxQuantityCoils) if err != nil { return nil, err } - discrete, err := c.initRequests(c.DiscreteInputs, cDiscreteInputs, maxQuantityDiscreteInput) + discrete, err := c.initRequests(c.DiscreteInputs, maxQuantityDiscreteInput) if err != nil { return nil, err } - holding, err := c.initRequests(c.HoldingRegisters, cHoldingRegisters, maxQuantityHoldingRegisters) + holding, err := c.initRequests(c.HoldingRegisters, maxQuantityHoldingRegisters) if err != nil { return nil, err } - input, err := c.initRequests(c.InputRegisters, cInputRegisters, maxQuantityInputRegisters) + input, err := c.initRequests(c.InputRegisters, maxQuantityInputRegisters) if err != nil { return nil, err } @@ -52,28 +125,12 @@ func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) { }, nil } -func (c *ConfigurationOriginal) Check() error { - if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { - return err - } - - if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { - return err - } - - if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { - return err - } - - return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) -} - -func (c *ConfigurationOriginal) initRequests(fieldDefs []fieldDefinition, registerType string, maxQuantity uint16) ([]request, error) { +func (c *ConfigurationOriginal) initRequests(fieldDefs []fieldDefinition, maxQuantity uint16) ([]request, error) { fields, err := c.initFields(fieldDefs) if err != nil { return nil, err } - return newRequestsFromFields(fields, c.SlaveID, registerType, maxQuantity), nil + return groupFieldsToRequests(fields, nil, maxQuantity), nil } func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field, error) { @@ -104,7 +161,6 @@ func (c *ConfigurationOriginal) newFieldFromDefinition(def fieldDefinition) (fie f := field{ measurement: def.Measurement, name: def.Name, - scale: def.Scale, address: def.Address[0], length: uint16(len(def.Address)), } diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go new file mode 100644 index 0000000000000..a1184606291e7 --- /dev/null +++ b/plugins/inputs/modbus/configuration_request.go @@ -0,0 +1,386 @@ +//go:build !openbsd +// +build !openbsd + +package modbus + +import ( + "fmt" + "hash/maphash" +) + +const sampleConfigPartPerRequest = ` + ## Per request definition + ## + + ## Define a request sent to the device + ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. + # [[inputs.modbus.request]] + ## ID of the modbus slave device to query. + ## If you need to query multiple slave-devices, create several "request" definitions. + # slave_id = 0 + + ## Byte order of the data. + ## |---ABCD or MSW-BE -- Big Endian (Motorola) + ## |---DCBA or LSW-LE -- Little Endian (Intel) + ## |---BADC or MSW-LE -- Big Endian with byte swap + ## |---CDAB or LSW-BE -- Little Endian with byte swap + # byte_order = "ABCD" + + ## Type of the register for the request + ## Can be "coil", "discrete", "holding" or "input" + # register = "holding" + + ## Name of the measurement. + ## Can be overriden by the individual field definitions. Defaults to "modbus" + # measurement = "modbus" + + ## Field definitions + ## Analog Variables, Input Registers and Holding Registers + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name *1 - field name + ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## scale *1,2 - (optional) factor to scale the variable with + ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## measurement *1 - (optional) measurement name, defaults to the setting of the request + ## omit - (optional) omit this field. Useful to leave out single values when querying many registers + ## with a single request. Defaults to "false". + ## + ## *1: Those fields are ignored if field is omitted ("omit"=true) + ## + ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types + ## the fields are output as zero or one in UINT64 format by default. + + ## Coil / discrete input example + # fields = [ + # { address=0, name="motor1_run"}, + # { address=1, name="jog", measurement="motor"}, + # { address=2, name="motor1_stop", omit=true}, + # { address=3, name="motor1_overheating"}, + # ] + + ## Per-request tags + ## These tags take precedence over predefined tags. + # [[inputs.modbus.request.tags]] + # name = "value" + + ## Holding / input example + ## All of those examples will result in FLOAT64 field outputs + # fields = [ + # { address=0, name="voltage", type="INT16", scale=0.1 }, + # { address=1, name="current", type="INT32", scale=0.001 }, + # { address=3, name="power", type="UINT32", omit=true }, + # { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, + # { address=7, name="frequency", type="UINT32", scale=0.1 }, + # { address=8, name="power_factor", type="INT64", scale=0.01 }, + # ] + + ## Holding / input example with type conversions + # fields = [ + # { address=0, name="rpm", type="INT16" }, # will result in INT64 field + # { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field + # { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field + # { address=4, name="hours", type="UINT32" }, # will result in UIN64 field + # ] + + ## Per-request tags + ## These tags take precedence over predefined tags. + # [[inputs.modbus.request.tags]] + # name = "value" +` + +type requestFieldDefinition struct { + Address uint16 `toml:"address"` + Name string `toml:"name"` + InputType string `toml:"type"` + Scale float64 `toml:"scale"` + OutputType string `toml:"output"` + Measurement string `toml:"measurement"` + Omit bool `toml:"omit"` +} + +type requestDefinition struct { + SlaveID byte `toml:"slave_id"` + ByteOrder string `toml:"byte_order"` + RegisterType string `toml:"register"` + Measurement string `toml:"measurement"` + Fields []requestFieldDefinition `toml:"fields"` + Tags map[string]string `toml:"tags"` +} + +type ConfigurationPerRequest struct { + Requests []requestDefinition `toml:"request"` +} + +func (c *ConfigurationPerRequest) SampleConfigPart() string { + return sampleConfigPartPerRequest +} + +func (c *ConfigurationPerRequest) Check() error { + seed := maphash.MakeSeed() + seenFields := make(map[uint64]bool) + + for _, def := range c.Requests { + // Check byte order of the data + switch def.ByteOrder { + case "": + def.ByteOrder = "ABCD" + case "ABCD", "DCBA", "BADC", "CDAB", "MSW-BE", "MSW-LE", "LSW-LE", "LSW-BE": + default: + return fmt.Errorf("unknown byte-order %q", def.ByteOrder) + } + + // Check register type + switch def.RegisterType { + case "": + def.RegisterType = "holding" + case "coil", "discrete", "holding", "input": + default: + return fmt.Errorf("unknown register-type %q", def.RegisterType) + } + + // Set the default for measurement if required + if def.Measurement == "" { + def.Measurement = "modbus" + } + + // Check the fields + for fidx, f := range def.Fields { + // Check the input type for all fields except the bit-field ones. + // We later need the type (even for omitted fields) to determine the length. + if def.RegisterType == cHoldingRegisters || def.RegisterType == cInputRegisters { + switch f.InputType { + case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": + default: + return fmt.Errorf("unknown register data-type %q for field %q", f.InputType, f.Name) + } + } + + // Other properties don't need to be checked for omitted fields + if f.Omit { + continue + } + + // Name is mandatory + if f.Name == "" { + return fmt.Errorf("empty field name in request for slave %d", def.SlaveID) + } + + // Check fields only relevant for non-bit register types + if def.RegisterType == cHoldingRegisters || def.RegisterType == cInputRegisters { + // Check output type + switch f.OutputType { + case "", "INT64", "UINT64", "FLOAT64": + default: + return fmt.Errorf("unknown output data-type %q for field %q", f.OutputType, f.Name) + } + } + + // Handle the default for measurement + if f.Measurement == "" { + f.Measurement = def.Measurement + } + def.Fields[fidx] = f + + // Check for duplicate field definitions + id, err := c.fieldID(seed, def.SlaveID, def.RegisterType, def.Measurement, f.Name) + if err != nil { + return fmt.Errorf("cannot determine field id for %q: %v", f.Name, err) + } + if seenFields[id] { + return fmt.Errorf("field %q duplicated in measurement %q (slave %d/%q)", f.Name, f.Measurement, def.SlaveID, def.RegisterType) + } + seenFields[id] = true + } + } + + return nil +} + +func (c *ConfigurationPerRequest) Process() (map[byte]requestSet, error) { + result := map[byte]requestSet{} + + for _, def := range c.Requests { + // Set default + if def.RegisterType == "" { + def.RegisterType = "holding" + } + + // Construct the fields + isTyped := def.RegisterType == "holding" || def.RegisterType == "input" + fields, err := c.initFields(def.Fields, isTyped, def.ByteOrder) + if err != nil { + return nil, err + } + + // Make sure we have a set to work with + set, found := result[def.SlaveID] + if !found { + set = requestSet{ + coil: []request{}, + discrete: []request{}, + holding: []request{}, + input: []request{}, + } + } + + switch def.RegisterType { + case "coil": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityCoils) + set.coil = append(set.coil, requests...) + case "discrete": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityDiscreteInput) + set.discrete = append(set.discrete, requests...) + case "holding": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityHoldingRegisters) + set.holding = append(set.holding, requests...) + case "input": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityInputRegisters) + set.input = append(set.input, requests...) + default: + return nil, fmt.Errorf("unknown register type %q", def.RegisterType) + } + result[def.SlaveID] = set + } + + return result, nil +} + +func (c *ConfigurationPerRequest) initFields(fieldDefs []requestFieldDefinition, typed bool, byteOrder string) ([]field, error) { + // Construct the fields from the field definitions + fields := make([]field, 0, len(fieldDefs)) + for _, def := range fieldDefs { + f, err := c.newFieldFromDefinition(def, typed, byteOrder) + if err != nil { + return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) + } + fields = append(fields, f) + } + + return fields, nil +} + +func (c *ConfigurationPerRequest) newFieldFromDefinition(def requestFieldDefinition, typed bool, byteOrder string) (field, error) { + var err error + + fieldLength := uint16(1) + if typed { + if fieldLength, err = c.determineFieldLength(def.InputType); err != nil { + return field{}, err + } + } + + // Initialize the field + f := field{ + measurement: def.Measurement, + name: def.Name, + address: def.Address, + length: fieldLength, + omit: def.Omit, + } + + // No more processing for un-typed (coil and discrete registers) or omitted fields + if !typed || def.Omit { + return f, nil + } + + // Automagically determine the output type... + if def.OutputType == "" { + if def.Scale == 0.0 { + // For non-scaling cases we should choose the output corresponding to the input class + // i.e. INT64 for INT*, UINT64 for UINT* etc. + var err error + if def.OutputType, err = c.determineOutputDatatype(def.InputType); err != nil { + return field{}, err + } + } else { + // For scaling cases we always want FLOAT64 by default + def.OutputType = "FLOAT64" + } + } + + // Setting default byte-order + if byteOrder == "" { + byteOrder = "ABCD" + } + + // Normalize the data relevant for determining the converter + inType, err := normalizeInputDatatype(def.InputType) + if err != nil { + return field{}, err + } + outType, err := normalizeOutputDatatype(def.OutputType) + if err != nil { + return field{}, err + } + order, err := normalizeByteOrder(byteOrder) + if err != nil { + return field{}, err + } + + f.converter, err = determineConverter(inType, order, outType, def.Scale) + if err != nil { + return field{}, err + } + + return f, nil +} + +func (c *ConfigurationPerRequest) fieldID(seed maphash.Seed, slave byte, register, measurement, name string) (uint64, error) { + var mh maphash.Hash + mh.SetSeed(seed) + + if err := mh.WriteByte(slave); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(register); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(measurement); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(name); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + + return mh.Sum64(), nil +} + +func (c *ConfigurationPerRequest) determineOutputDatatype(input string) (string, error) { + // Handle our special types + switch input { + case "INT16", "INT32", "INT64": + return "INT64", nil + case "UINT16", "UINT32", "UINT64": + return "UINT64", nil + case "FLOAT32", "FLOAT64": + return "FLOAT64", nil + } + return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) +} + +func (c *ConfigurationPerRequest) determineFieldLength(input string) (uint16, error) { + // Handle our special types + switch input { + case "INT16", "UINT16": + return 1, nil + case "INT32", "UINT32", "FLOAT32": + return 2, nil + case "INT64", "UINT64", "FLOAT64": + return 4, nil + } + return 0, fmt.Errorf("invalid input datatype %q for determining field length", input) +} diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index c5dfee2f6cbe6..beb6fd0019f2d 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( @@ -36,7 +39,10 @@ type Modbus struct { Workarounds ModbusWorkarounds `toml:"workarounds"` Log telegraf.Logger `toml:"-"` // Register configuration + ConfigurationType string `toml:"configuration_type"` ConfigurationOriginal + ConfigurationPerRequest + // Connection handling client mb.Client handler mb.ClientHandler @@ -57,9 +63,9 @@ type requestSet struct { type field struct { measurement string name string - scale float64 address uint16 length uint16 + omit bool converter fieldConverterFunc value interface{} } @@ -72,7 +78,7 @@ const ( ) const description = `Retrieve data from MODBUS slave devices` -const sampleConfig = ` +const sampleConfigStart = ` ## Connection Configuration ## ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or @@ -102,7 +108,6 @@ const sampleConfig = ` # data_bits = 8 # parity = "N" # stop_bits = 1 - # transmission_mode = "RTU" ## Trace the connection to the modbus device as debug messages ## Note: You have to enable telegraf's debug mode to see those messages! @@ -113,54 +118,12 @@ const sampleConfig = ` ## For Serial you can choose between "RTU" and "ASCII" # transmission_mode = "RTU" - ## Measurements - ## - - ## Digital Variables, Discrete Inputs and Coils - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## address - variable address - - discrete_inputs = [ - { name = "start", address = [0]}, - { name = "stop", address = [1]}, - { name = "reset", address = [2]}, - { name = "emergency_stop", address = [3]}, - ] - coils = [ - { name = "motor1_run", address = [0]}, - { name = "motor1_jog", address = [1]}, - { name = "motor1_stop", address = [2]}, - ] - - ## Analog Variables, Input Registers and Holding Registers - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## byte_order - the ordering of bytes - ## |---AB, ABCD - Big Endian - ## |---BA, DCBA - Little Endian - ## |---BADC - Mid-Big Endian - ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, - ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) - ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) - ## scale - the final numeric variable representation - ## address - variable address - - holding_registers = [ - { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, - { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, - { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, - { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, - { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, - { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, - ] - input_registers = [ - { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, - { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, - { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, - ] - + ## Define the configuration schema + ## |---register -- define fields per register type in the original style (only supports one slave ID) + ## |---request -- define fields on a requests base + configuration_type = "register" +` +const sampleConfigEnd = ` ## Enable workarounds required by some devices to work correctly # [inputs.modbus.workarounds] ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. @@ -173,7 +136,18 @@ const sampleConfig = ` // SampleConfig returns a basic configuration for the plugin func (m *Modbus) SampleConfig() string { - return sampleConfig + configs := []Configuration{} + cfgOriginal := m.ConfigurationOriginal + cfgPerRequest := m.ConfigurationPerRequest + configs = append(configs, &cfgOriginal, &cfgPerRequest) + + totalConfig := sampleConfigStart + for _, c := range configs { + totalConfig += c.SampleConfigPart() + "\n" + } + totalConfig += "\n" + totalConfig += sampleConfigEnd + return totalConfig } // Description returns a short description of what the plugin does @@ -191,14 +165,25 @@ func (m *Modbus) Init() error { return fmt.Errorf("retries cannot be negative") } + // Determine the configuration style + var cfg Configuration + switch m.ConfigurationType { + case "", "register": + cfg = &m.ConfigurationOriginal + case "request": + cfg = &m.ConfigurationPerRequest + default: + return fmt.Errorf("unknown configuration type %q", m.ConfigurationType) + } + // Check and process the configuration - if err := m.ConfigurationOriginal.Check(); err != nil { - return fmt.Errorf("original configuraton invalid: %v", err) + if err := cfg.Check(); err != nil { + return fmt.Errorf("configuraton invalid: %v", err) } - r, err := m.ConfigurationOriginal.Process() + r, err := cfg.Process() if err != nil { - return fmt.Errorf("cannot process original configuraton: %v", err) + return fmt.Errorf("cannot process configuraton: %v", err) } m.requests = r @@ -329,7 +314,6 @@ func (m *Modbus) initClient() error { return fmt.Errorf("invalid controller %q", m.Controller) } - m.handler.SetSlave(m.SlaveID) m.client = mb.NewClient(m.handler) m.isConnected = false @@ -350,7 +334,8 @@ func (m *Modbus) disconnect() error { } func (m *Modbus) gatherFields() error { - for _, requests := range m.requests { + for slaveID, requests := range m.requests { + m.handler.SetSlave(slaveID) if err := m.gatherRequestsCoil(requests.coil); err != nil { return err } @@ -477,6 +462,15 @@ func (m *Modbus) gatherRequestsInput(requests []request) error { func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, tags map[string]string, requests []request) { grouper := metric.NewSeriesGrouper() for _, request := range requests { + // Collect tags from global and per-request + rtags := map[string]string{} + for k, v := range tags { + rtags[k] = v + } + for k, v := range request.tags { + rtags[k] = v + } + for _, field := range request.fields { // In case no measurement was specified we use "modbus" as default measurement := "modbus" @@ -485,7 +479,7 @@ func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, ta } // Group the data by series - if err := grouper.Add(measurement, tags, timestamp, field.name, field.value); err != nil { + if err := grouper.Add(measurement, rtags, timestamp, field.name, field.value); err != nil { acc.AddError(fmt.Errorf("cannot add field %q for measurement %q: %v", field.name, measurement, err)) continue } diff --git a/plugins/inputs/modbus/modbus_openbsd.go b/plugins/inputs/modbus/modbus_openbsd.go new file mode 100644 index 0000000000000..c4df661dfbf23 --- /dev/null +++ b/plugins/inputs/modbus/modbus_openbsd.go @@ -0,0 +1,4 @@ +//go:build openbsd +// +build openbsd + +package modbus diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index b0b49b5711075..e35b8bdbe0146 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( @@ -1097,3 +1100,763 @@ func TestRetryFailIllegal(t *testing.T) { require.Equal(t, "modbus: exception '1' (illegal function), function '129'", err.Error()) require.Equal(t, counter, 1) } + +func TestConfigurationRegister(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + ConfigurationType: "register", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "coil", + Address: []uint16{0}, + }, + } + modbus.DiscreteInputs = []fieldDefinition{ + { + Name: "discrete", + Address: []uint16{0}, + }, + } + modbus.HoldingRegisters = []fieldDefinition{ + { + Name: "holding", + Address: []uint16{0}, + DataType: "INT16", + ByteOrder: "AB", + Scale: 1.0, + }, + } + modbus.InputRegisters = []fieldDefinition{ + { + Name: "input", + Address: []uint16{0}, + DataType: "INT16", + ByteOrder: "AB", + Scale: 1.0, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, len(modbus.Coils)) + require.Len(t, modbus.requests[1].discrete, len(modbus.DiscreteInputs)) + require.Len(t, modbus.requests[1].holding, len(modbus.HoldingRegisters)) + require.Len(t, modbus.requests[1].input, len(modbus.InputRegisters)) +} + +func TestConfigurationPerRequest(t *testing.T) { + modbus := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + modbus.Requests = []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "coil-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-3", + Address: uint16(6), + }, + { + Name: "coil-4", + Address: uint16(7), + Omit: true, + }, + { + Name: "coil-5", + Address: uint16(8), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "discrete-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "holding-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "holding-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "input-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "input-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, 2) + require.Len(t, modbus.requests[1].discrete, 1) + require.Len(t, modbus.requests[1].holding, 1) + require.Len(t, modbus.requests[1].input, 1) +} + +func TestConfigurationPerRequestWithTags(t *testing.T) { + modbus := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + modbus.Requests = []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "coil-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-3", + Address: uint16(6), + }, + { + Name: "coil-4", + Address: uint16(7), + Omit: true, + }, + { + Name: "coil-5", + Address: uint16(8), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "discrete-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "holding-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "holding-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "input-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "input-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, 2) + require.Len(t, modbus.requests[1].discrete, 1) + require.Len(t, modbus.requests[1].holding, 1) + require.Len(t, modbus.requests[1].input, 1) + + expectedTags := map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + } + require.Equal(t, expectedTags, modbus.requests[1].coil[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].coil[1].tags) + require.Equal(t, expectedTags, modbus.requests[1].discrete[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].holding[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].input[0].tags) +} + +func TestConfigurationPerRequestFail(t *testing.T) { + tests := []struct { + name string + requests []requestDefinition + errormsg string + }{ + { + name: "empty field name (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Address: uint16(15), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid byte-order (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "coil", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "duplicate fields (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"coil-0\" duplicated in measurement \"modbus\" (slave 1/\"coil\")", + }, + { + name: "duplicate fields multiple requests (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"coil-0\" duplicated in measurement \"foo\" (slave 1/\"coil\")", + }, + { + name: "invalid byte-order (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "discrete", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "duplicate fields (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"discrete-0\" duplicated in measurement \"modbus\" (slave 1/\"discrete\")", + }, + { + name: "duplicate fields multiple requests (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"discrete-0\" duplicated in measurement \"foo\" (slave 1/\"discrete\")", + }, + { + name: "invalid byte-order (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "holding", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "invalid field name (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Address: uint16(0), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid field input type (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"holding-0\" failed: invalid input datatype \"\" for determining field length", + }, + { + name: "invalid field output type (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "UINT16", + OutputType: "UINT8", + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"holding-0\" failed: unknown output type \"UINT8\"", + }, + { + name: "duplicate fields (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + }, + { + Name: "holding-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"holding-0\" duplicated in measurement \"modbus\" (slave 1/\"holding\")", + }, + { + name: "duplicate fields multiple requests (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"holding-0\" duplicated in measurement \"foo\" (slave 1/\"holding\")", + }, + { + name: "invalid byte-order (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "input", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "invalid field name (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Address: uint16(0), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid field input type (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"input-0\" failed: invalid input datatype \"\" for determining field length", + }, + { + name: "invalid field output type (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "UINT16", + OutputType: "UINT8", + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"input-0\" failed: unknown output type \"UINT8\"", + }, + { + name: "duplicate fields (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + }, + { + Name: "input-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"input-0\" duplicated in measurement \"modbus\" (slave 1/\"input\")", + }, + { + name: "duplicate fields multiple requests (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"input-0\" duplicated in measurement \"foo\" (slave 1/\"input\")", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + plugin.Requests = tt.requests + + err := plugin.Init() + require.Error(t, err) + require.Equal(t, tt.errormsg, err.Error()) + require.Empty(t, plugin.requests) + }) + } +} diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go index 125aebe2eb8c4..3d5a981432f26 100644 --- a/plugins/inputs/modbus/request.go +++ b/plugins/inputs/modbus/request.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import "sort" @@ -6,9 +9,25 @@ type request struct { address uint16 length uint16 fields []field + tags map[string]string } -func newRequestsFromFields(fields []field, slaveID byte, registerType string, maxBatchSize uint16) []request { +func newRequest(f field, tags map[string]string) request { + r := request{ + address: f.address, + length: f.length, + fields: []field{f}, + tags: map[string]string{}, + } + + // Copy the tags + for k, v := range tags { + r.tags[k] = v + } + return r +} + +func groupFieldsToRequests(fields []field, tags map[string]string, maxBatchSize uint16) []request { if len(fields) == 0 { return nil } @@ -26,31 +45,25 @@ func newRequestsFromFields(fields []field, slaveID byte, registerType string, ma // and the given maximum chunk sizes. var requests []request - current := request{ - address: fields[0].address, - length: fields[0].length, - fields: []field{fields[0]}, - } - + current := newRequest(fields[0], tags) for _, f := range fields[1:] { // Check if we need to interrupt the current chunk and require a new one needInterrupt := f.address != current.address+current.length // not consecutive needInterrupt = needInterrupt || f.length+current.length > maxBatchSize // too large if !needInterrupt { - // Still save to add the field to the current request + // Still safe to add the field to the current request current.length += f.length - current.fields = append(current.fields, f) // TODO: omit the field with a future flag + if !f.omit { + // Omit adding the field but use it for constructing the request. + current.fields = append(current.fields, f) + } continue } // Finish the current request, add it to the list and construct a new one requests = append(requests, current) - current = request{ - address: f.address, - length: f.length, - fields: []field{f}, - } + current = newRequest(f, tags) } requests = append(requests, current) diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go index 556f7b423c13d..55acdfecf544b 100644 --- a/plugins/inputs/modbus/type_conversions.go +++ b/plugins/inputs/modbus/type_conversions.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import "fmt" diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go index 7766e1d0edafe..5931fc6895edc 100644 --- a/plugins/inputs/modbus/type_conversions16.go +++ b/plugins/inputs/modbus/type_conversions16.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( @@ -9,9 +12,9 @@ type convert16 func([]byte) uint16 func endianessConverter16(byteOrder string) (convert16, error) { switch byteOrder { - case "ABCD": // Big endian (Motorola) + case "ABCD", "CDAB": // Big endian (Motorola) return binary.BigEndian.Uint16, nil - case "DCBA": // Little endian (Intel) + case "DCBA", "BADC": // Little endian (Intel) return binary.LittleEndian.Uint16, nil } return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go index 1a0255ef3e8e0..80f7ee6a0c6c7 100644 --- a/plugins/inputs/modbus/type_conversions32.go +++ b/plugins/inputs/modbus/type_conversions32.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go index f72dfdf3af66d..feef4112b7f2c 100644 --- a/plugins/inputs/modbus/type_conversions64.go +++ b/plugins/inputs/modbus/type_conversions64.go @@ -1,3 +1,6 @@ +//go:build !openbsd +// +build !openbsd + package modbus import ( diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 15a474e6bb66a..678d80c73184d 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -2,7 +2,7 @@ All MongoDB server versions from 2.6 and higher are supported. -### Configuration: +## Configuration ```toml [[inputs.mongodb]] @@ -11,7 +11,7 @@ All MongoDB server versions from 2.6 and higher are supported. ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status. ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -40,20 +40,22 @@ All MongoDB server versions from 2.6 and higher are supported. # insecure_skip_verify = false ``` -#### Permissions: +### Permissions If your MongoDB instance has access control enabled you will need to connect as a user with sufficient rights. With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In version 3.2 you may also need these additional permissions: -``` + +```shell > db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}]) ``` If the user is missing required privileges you may see an error in the Telegraf logs similar to: -``` + +```shell Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` @@ -61,7 +63,7 @@ Some permission related errors are logged at debug level, you can check these messages by setting `debug = true` in the agent section of the configuration or by running Telegraf with the `--debug` argument. -### Metrics: +### Metrics - mongodb - tags: @@ -231,7 +233,7 @@ by running Telegraf with the `--debug` argument. - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -+ mongodb_db_stats +- mongodb_db_stats - tags: - db_name - hostname @@ -293,8 +295,9 @@ by running Telegraf with the `--debug` argument. - commands_time (integer) - commands_count (integer) -### Example Output: -``` +### Example Output + +```shell mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 0366636200064..3417252ddeb59 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -44,7 +44,7 @@ var sampleConfig = ` ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + servers = ["mongodb://127.0.0.1:27017?connect=direct"] ## When true, collect cluster status ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 378268916054d..f7f891ec775bf 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -65,7 +66,7 @@ func TestAddNonReplStats(t *testing.T) { d.flush(&acc) for key := range defaultStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -86,7 +87,7 @@ func TestAddReplStats(t *testing.T) { d.flush(&acc) for key := range mmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -120,14 +121,14 @@ func TestAddWiredTigerStats(t *testing.T) { d.flush(&acc) for key := range wiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key), key) } for key := range wiredTigerExtStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } - assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) + require.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -147,7 +148,7 @@ func TestAddShardStats(t *testing.T) { d.flush(&acc) for key := range defaultShardStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -170,7 +171,7 @@ func TestAddLatencyStats(t *testing.T) { d.flush(&acc) for key := range defaultLatencyStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -192,7 +193,7 @@ func TestAddAssertsStats(t *testing.T) { d.flush(&acc) for key := range defaultAssertsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -227,7 +228,7 @@ func TestAddCommandsStats(t *testing.T) { d.flush(&acc) for key := range defaultCommandsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -263,7 +264,7 @@ func TestAddTCMallocStats(t *testing.T) { d.flush(&acc) for key := range defaultTCMallocStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -283,7 +284,7 @@ func TestAddStorageStats(t *testing.T) { d.flush(&acc) for key := range defaultStorageStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -313,15 +314,15 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { for key := range shardHostStats { - assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } - assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) + require.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) hostsFound = append(hostsFound, host) } sort.Strings(hostsFound) sort.Strings(expectedHosts) - assert.Equal(t, hostsFound, expectedHosts) + require.Equal(t, hostsFound, expectedHosts) } func TestStateTag(t *testing.T) { @@ -527,7 +528,7 @@ func TestAddTopStats(t *testing.T) { for range topStatLines { for key := range topDataStats { - assert.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + require.True(t, acc.HasInt64Field("mongodb_top_stats", key)) } } } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index c8fd9f7c15284..d2313e4088f82 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -6,9 +6,9 @@ package mongodb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGetDefaultTags(t *testing.T) { @@ -37,7 +37,7 @@ func TestAddDefaultStats(t *testing.T) { require.NoError(t, err) for key := range defaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + require.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ea69c8d424f7c..2490ca2c1777c 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -903,7 +903,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) (int64, int64) { +func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) { d := newVal - oldVal if d < 0 { d = newVal @@ -1311,10 +1311,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // I'm the master returnVal.ReplLag = 0 break - } else { - // I'm secondary - me = member } + + // I'm secondary + me = member } else if member.State == 1 { // Master found master = member diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 9f6ef04892ac9..908b82de1b911 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -2,10 +2,8 @@ package mongodb import ( "testing" - //"time" - //"github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLatencyStats(t *testing.T) { @@ -55,12 +53,12 @@ func TestLatencyStats(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiffZero(t *testing.T) { @@ -124,12 +122,12 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiff(t *testing.T) { @@ -193,10 +191,10 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(59177981552)) - assert.Equal(t, sl.ReadLatency, int64(2255946760057)) - assert.Equal(t, sl.WriteLatency, int64(494479456987)) - assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, sl.CommandLatency, int64(59177981552)) + require.Equal(t, sl.ReadLatency, int64(2255946760057)) + require.Equal(t, sl.WriteLatency, int64(494479456987)) + require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) } diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index aa4a08b31bbc8..053e745982a5b 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -12,7 +12,7 @@ Minimum Version of Monit tested with is 5.16. [monit]: https://mmonit.com/ [httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT -### Configuration +## Configuration ```toml [[inputs.monit]] @@ -34,7 +34,7 @@ Minimum Version of Monit tested with is 5.16. # insecure_skip_verify = false ``` -### Metrics +## Metrics - monit_filesystem - tags: @@ -57,7 +57,7 @@ Minimum Version of Monit tested with is 5.16. - inode_usage - inode_total -+ monit_directory +- monit_directory - tags: - address - version @@ -88,7 +88,7 @@ Minimum Version of Monit tested with is 5.16. - size - permissions -+ monit_process +- monit_process - tags: - address - version @@ -132,7 +132,7 @@ Minimum Version of Monit tested with is 5.16. - protocol - type -+ monit_system +- monit_system - tags: - address - version @@ -169,9 +169,9 @@ Minimum Version of Monit tested with is 5.16. - status_code - monitoring_status_code - monitoring_mode_code - - permissions + - permissions -+ monit_program +- monit_program - tags: - address - version @@ -199,7 +199,7 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -+ monit_program +- monit_program - tags: - address - version @@ -227,8 +227,9 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -### Example Output -``` +## Example Output + +```shell monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000 monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index 1cb1a4ba57da9..051e0b36982fe 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -6,23 +6,24 @@ import ( "net/http" "time" + "golang.org/x/net/html/charset" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) const ( - fileSystem string = "0" - directory = "1" - file = "2" - process = "3" - remoteHost = "4" - system = "5" - fifo = "6" - program = "7" - network = "8" + fileSystem = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" ) var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} @@ -244,108 +245,109 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - if resp.StatusCode == 200 { - var status Status - decoder := xml.NewDecoder(resp.Body) - decoder.CharsetReader = charset.NewReaderLabel - if err := decoder.Decode(&status); err != nil { - return fmt.Errorf("error parsing input: %v", err) - } + if resp.StatusCode != 200 { + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) + } - tags := map[string]string{ - "version": status.Server.Version, - "source": status.Server.LocalHostname, - "platform_name": status.Platform.Name, - } + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } + + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } - for _, service := range status.Services { - fields := make(map[string]interface{}) - tags["status"] = serviceStatus(service) - fields["status_code"] = service.Status - tags["pending_action"] = pendingAction(service) - fields["pending_action_code"] = service.PendingAction - tags["monitoring_status"] = monitoringStatus(service) - fields["monitoring_status_code"] = service.MonitoringStatus - tags["monitoring_mode"] = monitoringMode(service) - fields["monitoring_mode_code"] = service.MonitorMode - tags["service"] = service.Name - if service.Type == fileSystem { - fields["mode"] = service.Mode - fields["block_percent"] = service.Block.Percent - fields["block_usage"] = service.Block.Usage - fields["block_total"] = service.Block.Total - fields["inode_percent"] = service.Inode.Percent - fields["inode_usage"] = service.Inode.Usage - fields["inode_total"] = service.Inode.Total - acc.AddFields("monit_filesystem", fields, tags) - } else if service.Type == directory { - fields["mode"] = service.Mode - acc.AddFields("monit_directory", fields, tags) - } else if service.Type == file { - fields["size"] = service.Size - fields["mode"] = service.Mode - acc.AddFields("monit_file", fields, tags) - } else if service.Type == process { - fields["cpu_percent"] = service.CPU.Percent - fields["cpu_percent_total"] = service.CPU.PercentTotal - fields["mem_kb"] = service.Memory.Kilobyte - fields["mem_kb_total"] = service.Memory.KilobyteTotal - fields["mem_percent"] = service.Memory.Percent - fields["mem_percent_total"] = service.Memory.PercentTotal - fields["pid"] = service.Pid - fields["parent_pid"] = service.ParentPid - fields["threads"] = service.Threads - fields["children"] = service.Children - acc.AddFields("monit_process", fields, tags) - } else if service.Type == remoteHost { - fields["remote_hostname"] = service.Port.Hostname - fields["port_number"] = service.Port.PortNumber - fields["request"] = service.Port.Request - fields["response_time"] = service.Port.ResponseTime - fields["protocol"] = service.Port.Protocol - fields["type"] = service.Port.Type - acc.AddFields("monit_remote_host", fields, tags) - } else if service.Type == system { - fields["cpu_system"] = service.System.CPU.System - fields["cpu_user"] = service.System.CPU.User - fields["cpu_wait"] = service.System.CPU.Wait - fields["cpu_load_avg_1m"] = service.System.Load.Avg01 - fields["cpu_load_avg_5m"] = service.System.Load.Avg05 - fields["cpu_load_avg_15m"] = service.System.Load.Avg15 - fields["mem_kb"] = service.System.Memory.Kilobyte - fields["mem_percent"] = service.System.Memory.Percent - fields["swap_kb"] = service.System.Swap.Kilobyte - fields["swap_percent"] = service.System.Swap.Percent - acc.AddFields("monit_system", fields, tags) - } else if service.Type == fifo { - fields["mode"] = service.Mode - acc.AddFields("monit_fifo", fields, tags) - } else if service.Type == program { - fields["program_started"] = service.Program.Started * 10000000 - fields["program_status"] = service.Program.Status - acc.AddFields("monit_program", fields, tags) - } else if service.Type == network { - fields["link_state"] = service.Link.State - fields["link_speed"] = service.Link.Speed - fields["link_mode"] = linkMode(service) - fields["download_packets_now"] = service.Link.Download.Packets.Now - fields["download_packets_total"] = service.Link.Download.Packets.Total - fields["download_bytes_now"] = service.Link.Download.Bytes.Now - fields["download_bytes_total"] = service.Link.Download.Bytes.Total - fields["download_errors_now"] = service.Link.Download.Errors.Now - fields["download_errors_total"] = service.Link.Download.Errors.Total - fields["upload_packets_now"] = service.Link.Upload.Packets.Now - fields["upload_packets_total"] = service.Link.Upload.Packets.Total - fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now - fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total - fields["upload_errors_now"] = service.Link.Upload.Errors.Now - fields["upload_errors_total"] = service.Link.Upload.Errors.Total - acc.AddFields("monit_network", fields, tags) - } + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) } - } else { - return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) } + return nil } diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index b3bbed79f68e1..ef47575e80b4c 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -8,10 +8,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type transportMock struct { @@ -632,7 +632,7 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { require.NoError(t, r.Init()) err := r.Gather(&acc) - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestInvalidXMLAndInvalidTypes(t *testing.T) { diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index a9e8236ee0cf5..19b57f79a1a32 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -3,12 +3,12 @@ The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics and creates metrics using one of the supported [input data formats][]. -### Configuration +## Configuration ```toml [[inputs.mqtt_consumer]] ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] @@ -73,6 +73,63 @@ and creates metrics using one of the supported [input data formats][]. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type +``` + +## About Topic Parsing + +The MQTT topic as a whole is stored as a tag, but this can be far too coarse +to be easily used when utilizing the data further down the line. This +change allows tag values to be extracted from the MQTT topic letting you +store the information provided in the topic in a meaningful way. An `_` denotes an +ignored entry in the topic path. Please see the following example. + +## Example Configuration for topic parsing + +```toml +[[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/+/cpu/23", + ] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "float" + + [[inputs.mqtt_consumer.topic_parsing]] + topic = "telegraf/one/cpu/23" + measurement = "_/_/measurement/_" + tags = "tag/_/_/_" + fields = "_/_/_/test" + [inputs.mqtt_consumer.topic_parsing.types] + test = "int" +``` + +Result: + +```shell +cpu,host=pop-os,tag=telegraf,topic=telegraf/one/cpu/23 value=45,test=23i 1637014942460689291 ``` ### Metrics @@ -80,5 +137,7 @@ and creates metrics using one of the supported [input data formats][]. - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` +- example when [[inputs.mqtt_consumer.topic_parsing]] is set + [mqtt]: https://mqtt.org [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 815f27a727abf..d869ccc7eb102 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strconv" "strings" "sync" "time" @@ -19,8 +20,7 @@ import ( var ( // 30 Seconds is the default used by paho.mqtt.golang - defaultConnectionTimeout = config.Duration(30 * time.Second) - + defaultConnectionTimeout = config.Duration(30 * time.Second) defaultMaxUndeliveredMessages = 1000 ) @@ -40,30 +40,36 @@ type Client interface { AddRoute(topic string, callback mqtt.MessageHandler) Disconnect(quiesce uint) } - type ClientFactory func(o *mqtt.ClientOptions) Client - +type TopicParsingConfig struct { + Topic string `toml:"topic"` + Measurement string `toml:"measurement"` + Tags string `toml:"tags"` + Fields string `toml:"fields"` + FieldTypes map[string]string `toml:"types"` + // cached split of user given information + MeasurementIndex int + SplitTags []string + SplitFields []string + SplitTopic []string +} type MQTTConsumer struct { - Servers []string `toml:"servers"` - Topics []string `toml:"topics"` - TopicTag *string `toml:"topic_tag"` - Username string `toml:"username"` - Password string `toml:"password"` - QoS int `toml:"qos"` - ConnectionTimeout config.Duration `toml:"connection_timeout"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - - parser parsers.Parser - + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + TopicParsing []TopicParsingConfig `toml:"topic_parsing"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout config.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + parser parsers.Parser // Legacy metric buffer support; deprecated in v0.10.3 - MetricBuffer int - + MetricBuffer int PersistentSession bool ClientID string `toml:"client_id"` tls.ClientConfig - - Log telegraf.Logger - + Log telegraf.Logger clientFactory ClientFactory client Client opts *mqtt.ClientOptions @@ -72,31 +78,28 @@ type MQTTConsumer struct { sem semaphore messages map[telegraf.TrackingID]bool messagesMutex sync.Mutex - topicTag string - - ctx context.Context - cancel context.CancelFunc + topicTagParse string + ctx context.Context + cancel context.CancelFunc } var sampleConfig = ` ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] servers = ["tcp://127.0.0.1:1883"] - ## Topics that will be subscribed to. topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - + # topic_fields = "_/_/_/temperature" ## The message topic will be stored in a tag specified by this value. If set ## to the empty string no topic tag will be created. # topic_tag = "topic" - ## QoS policy for messages ## 0 = at most once ## 1 = at least once @@ -105,10 +108,8 @@ var sampleConfig = ` ## When using a QoS of 1 or 2, you should enable persistent_session to allow ## resuming unacknowledged messages. # qos = 0 - ## Connection timeout for initial connection in seconds # connection_timeout = "30s" - ## Maximum messages to read from the broker that have not been written by an ## output. For best throughput set based on the number of metrics within ## each message and the size of the output's metric_batch_size. @@ -118,87 +119,103 @@ var sampleConfig = ` ## full batch is collected and the write is triggered immediately without ## waiting until the next flush_interval. # max_undelivered_messages = 1000 - ## Persistent session disables clearing of the client session on connection. ## In order for this option to work you must also set client_id to identify ## the client. To receive messages that arrived while the client is offline, ## also set the qos option to 1 or 2 and don't forget to also set the QoS when ## publishing. # persistent_session = false - ## If unset, a random client ID will be generated. # client_id = "" - ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + ## [[inputs.mqtt_consumer.topic_parsing]] + ## topic = "" + ## measurement = "" + ## tags = "" + ## fields = "" + ## [inputs.mqtt_consumer.topic_parsing.types] + ## ` func (m *MQTTConsumer) SampleConfig() string { return sampleConfig } - func (m *MQTTConsumer) Description() string { return "Read metrics from MQTT topic(s)" } - func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } - func (m *MQTTConsumer) Init() error { m.state = Disconnected - if m.PersistentSession && m.ClientID == "" { return errors.New("persistent_session requires client_id") } - if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } - if time.Duration(m.ConnectionTimeout) < 1*time.Second { return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - - m.topicTag = "topic" + m.topicTagParse = "topic" if m.TopicTag != nil { - m.topicTag = *m.TopicTag + m.topicTagParse = *m.TopicTag } - opts, err := m.createOpts() if err != nil { return err } - m.opts = opts m.messages = map[telegraf.TrackingID]bool{} + for i, p := range m.TopicParsing { + splitMeasurement := strings.Split(p.Measurement, "/") + for j := range splitMeasurement { + if splitMeasurement[j] != "_" { + m.TopicParsing[i].MeasurementIndex = j + break + } + } + m.TopicParsing[i].SplitTags = strings.Split(p.Tags, "/") + m.TopicParsing[i].SplitFields = strings.Split(p.Fields, "/") + m.TopicParsing[i].SplitTopic = strings.Split(p.Topic, "/") + + if len(splitMeasurement) != len(m.TopicParsing[i].SplitTopic) && len(splitMeasurement) != 1 { + return fmt.Errorf("config error topic parsing: measurement length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitFields) != len(m.TopicParsing[i].SplitTopic) && p.Fields != "" { + return fmt.Errorf("config error topic parsing: fields length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitTags) != len(m.TopicParsing[i].SplitTopic) && p.Tags != "" { + return fmt.Errorf("config error topic parsing: tags length does not equal topic length") + } + } + return nil } - func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.state = Disconnected - m.acc = acc.WithTracking(m.MaxUndeliveredMessages) m.sem = make(semaphore, m.MaxUndeliveredMessages) m.ctx, m.cancel = context.WithCancel(context.Background()) - m.client = m.clientFactory(m.opts) - // AddRoute sets up the function for handling messages. These need to be // added in case we find a persistent session containing subscriptions so we // know where to dispatch persisted and new messages to. In the alternate @@ -206,11 +223,9 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { for _, topic := range m.Topics { m.client.AddRoute(topic, m.recvMessage) } - m.state = Connecting return m.connect() } - func (m *MQTTConsumer) connect() error { token := m.client.Connect() if token.Wait() && token.Error() != nil { @@ -218,10 +233,8 @@ func (m *MQTTConsumer) connect() error { m.state = Disconnected return err } - m.Log.Infof("Connected %v", m.Servers) m.state = Connected - // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. type sessionPresent interface { @@ -231,28 +244,23 @@ func (m *MQTTConsumer) connect() error { m.Log.Debugf("Session found %v", m.Servers) return nil } - topics := make(map[string]byte) for _, topic := range m.Topics { topics[topic] = byte(m.QoS) } - subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", strings.Join(m.Topics[:], ","), subscribeToken.Error())) } - return nil } - func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected } - func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { for { select { @@ -278,26 +286,60 @@ func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { } } +// compareTopics is used to support the mqtt wild card `+` which allows for one topic of any value +func compareTopics(expected []string, incoming []string) bool { + if len(expected) != len(incoming) { + return false + } + + for i, expected := range expected { + if incoming[i] != expected && expected != "+" { + return false + } + } + + return true +} + func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Message) error { metrics, err := m.parser.Parse(msg.Payload()) if err != nil { return err } - if m.topicTag != "" { - topic := msg.Topic() - for _, metric := range metrics { - metric.AddTag(m.topicTag, topic) + for _, metric := range metrics { + if m.topicTagParse != "" { + metric.AddTag(m.topicTagParse, msg.Topic()) } - } + for _, p := range m.TopicParsing { + values := strings.Split(msg.Topic(), "/") + if !compareTopics(p.SplitTopic, values) { + continue + } + if p.Measurement != "" { + metric.SetName(values[p.MeasurementIndex]) + } + if p.Tags != "" { + err := parseMetric(p.SplitTags, values, p.FieldTypes, true, metric) + if err != nil { + return err + } + } + if p.Fields != "" { + err := parseMetric(p.SplitFields, values, p.FieldTypes, false, metric) + if err != nil { + return err + } + } + } + } id := acc.AddTrackingMetricGroup(metrics) m.messagesMutex.Lock() m.messages[id] = true m.messagesMutex.Unlock() return nil } - func (m *MQTTConsumer) Stop() { if m.state == Connected { m.Log.Debugf("Disconnecting %v", m.Servers) @@ -307,37 +349,29 @@ func (m *MQTTConsumer) Stop() { } m.cancel() } - func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) return m.connect() } - return nil } - func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - opts.ConnectTimeout = time.Duration(m.ConnectionTimeout) - if m.ClientID == "" { opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) } else { opts.SetClientID(m.ClientID) } - tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err } - if tlsCfg != nil { opts.SetTLSConfig(tlsCfg) } - user := m.Username if user != "" { opts.SetUsername(user) @@ -346,11 +380,9 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { if password != "" { opts.SetPassword(password) } - if len(m.Servers) == 0 { return opts, fmt.Errorf("could not get host informations") } - for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { @@ -361,17 +393,66 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { server = "ssl://" + server } } - opts.AddBroker(server) } opts.SetAutoReconnect(false) opts.SetKeepAlive(time.Second * 60) opts.SetCleanSession(!m.PersistentSession) opts.SetConnectionLostHandler(m.onConnectionLost) - return opts, nil } +// parseFields gets multiple fields from the topic based on the user configuration (TopicParsing.Fields) +func parseMetric(keys []string, values []string, types map[string]string, isTag bool, metric telegraf.Metric) error { + for i, k := range keys { + if k == "_" { + continue + } + + if isTag { + metric.AddTag(k, values[i]) + } else { + newType, err := typeConvert(types, values[i], k) + if err != nil { + return err + } + metric.AddField(k, newType) + } + } + return nil +} + +func typeConvert(types map[string]string, topicValue string, key string) (interface{}, error) { + var newType interface{} + var err error + // If the user configured inputs.mqtt_consumer.topic.types, check for the desired type + if desiredType, ok := types[key]; ok { + switch desiredType { + case "uint": + newType, err = strconv.ParseUint(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", topicValue, err) + } + case "int": + newType, err = strconv.ParseInt(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", topicValue, err) + } + case "float": + newType, err = strconv.ParseFloat(topicValue, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", topicValue, err) + } + default: + return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType) + } + } else { + newType = topicValue + } + + return newType, nil +} + func New(factory ClientFactory) *MQTTConsumer { return &MQTTConsumer{ Servers: []string{"tcp://127.0.0.1:1883"}, @@ -381,7 +462,6 @@ func New(factory ClientFactory) *MQTTConsumer { state: Disconnected, } } - func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { return New(func(o *mqtt.ClientOptions) Client { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index a9b85c108ab65..2eb7d6ffabc26 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -1,6 +1,7 @@ package mqtt_consumer import ( + "fmt" "testing" "time" @@ -153,6 +154,7 @@ func TestPersistentClientIDFail(t *testing.T) { } type Message struct { + topic string } func (m *Message) Duplicate() bool { @@ -168,7 +170,7 @@ func (m *Message) Retained() bool { } func (m *Message) Topic() string { - return "telegraf" + return m.topic } func (m *Message) MessageID() uint16 { @@ -185,12 +187,16 @@ func (m *Message) Ack() { func TestTopicTag(t *testing.T) { tests := []struct { - name string - topicTag func() *string - expected []telegraf.Metric + name string + topic string + topicTag func() *string + expectedError error + topicParsing []TopicParsingConfig + expected []telegraf.Metric }{ { - name: "default topic when topic tag is unset for backwards compatibility", + name: "default topic when topic tag is unset for backwards compatibility", + topic: "telegraf", topicTag: func() *string { return nil }, @@ -208,7 +214,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "use topic tag when set", + name: "use topic tag when set", + topic: "telegraf", topicTag: func() *string { tag := "topic_tag" return &tag @@ -227,7 +234,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "no topic tag is added when topic tag is set to the empty string", + name: "no topic tag is added when topic tag is set to the empty string", + topic: "telegraf", topicTag: func() *string { tag := "" return &tag @@ -243,6 +251,167 @@ func TestTopicTag(t *testing.T) { ), }, }, + { + name: "topic parsing configured", + topic: "telegraf/123/test", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/123/test", + Measurement: "_/_/measurement", + Tags: "testTag/_/_", + Fields: "_/testNumber/_", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured with a mqtt wild card `+`", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/testNumber/_/testString", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured incorrectly", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + expectedError: fmt.Errorf("config error topic parsing: fields length does not equal topic length"), + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/_/testNumber:int/_/testString:string", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured without fields", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured without measurement", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Tags: "testTag/_/_/_", + Fields: "_/testNumber/_/testString", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -265,21 +434,28 @@ func TestTopicTag(t *testing.T) { return client }) plugin.Log = testutil.Logger{} - plugin.Topics = []string{"telegraf"} + plugin.Topics = []string{tt.topic} plugin.TopicTag = tt.topicTag() + plugin.TopicParsing = tt.topicParsing parser, err := parsers.NewInfluxParser() require.NoError(t, err) plugin.SetParser(parser) err = plugin.Init() - require.NoError(t, err) + require.Equal(t, tt.expectedError, err) + if tt.expectedError != nil { + return + } var acc testutil.Accumulator err = plugin.Start(&acc) require.NoError(t, err) - handler(nil, &Message{}) + var m Message + m.topic = tt.topic + + handler(nil, &m) plugin.Stop() diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 2d71ac159cdd2..b460d59aa90ed 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -7,7 +7,8 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. > Note: If you wish to parse metrics from a single file formatted in one of the supported > [input data formats][], you should use the [file][] input plugin instead. -### Configuration +## Configuration + ```toml [[inputs.multifile]] ## Base directory where telegraf will look for files. @@ -34,32 +35,37 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. ``` Each file table can contain the following options: + * `file`: Path of the file to be parsed, relative to the `base_dir`. * `dest`: Name of the field/tag key, defaults to `$(basename file)`. * `conversion`: Data format used to parse the file contents: - * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - * `int`: Converts the value into an integer. - * `string`, `""`: No conversion. - * `bool`: Converts the value into a boolean. - * `tag`: File content is used as a tag. - -### Example Output + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + * `int`: Converts the value into an integer. + * `string`, `""`: No conversion. + * `bool`: Converts the value into a boolean. + * `tag`: File content is used as a tag. + +## Example Output + This example shows a BME280 connected to a Raspberry Pi, using the sample config. -``` + +```sh multifile pressure=101.343285156,temperature=20.4,humidityrelative=48.9 1547202076000000000 ``` To reproduce this, connect a BMP280 to the board's GPIO pins and register the BME280 device driver -``` + +```sh cd /sys/bus/i2c/devices/i2c-1 echo bme280 0x76 > new_device ``` The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/iio:device0`: + * `in_humidityrelative_input`: `48900` * `in_pressure_input`: `101.343285156` * `in_temp_input`: `20400` diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go index b12f29f35c2cd..214cebd136f9c 100644 --- a/plugins/inputs/multifile/multifile_test.go +++ b/plugins/inputs/multifile/multifile_test.go @@ -5,9 +5,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestFileTypes(t *testing.T) { @@ -32,8 +32,8 @@ func TestFileTypes(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + require.Equal(t, map[string]interface{}{ "examplebool": true, "examplestring": "hello world", "exampleint": int64(123456), @@ -60,7 +60,7 @@ func FailEarly(failEarly bool, t *testing.T) error { err := m.Gather(&acc) if err == nil { - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "exampleint": int64(123456), }, acc.Metrics[0].Fields) } diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 0a96f9c9b1447..cd98b454408b0 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -18,7 +18,7 @@ This plugin gathers the statistic data from MySQL server * File events statistics * Table schema statistics -### Configuration +## Configuration ```toml [[inputs.mysql]] @@ -122,7 +122,7 @@ This plugin gathers the statistic data from MySQL server # insecure_skip_verify = false ``` -#### Metric Version +### Metric Version When `metric_version = 2`, a variety of field type issues are corrected as well as naming inconsistencies. If you have existing data on the original version @@ -132,6 +132,7 @@ InfluxDB due to the change of types. For this reason, you should keep the If preserving your old data is not required you may wish to drop conflicting measurements: + ```sql DROP SERIES from mysql DROP SERIES from mysql_variables @@ -143,6 +144,7 @@ Otherwise, migration can be performed using the following steps: 1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and `metric_version = 2`, this will result in collection using both the old and new style concurrently: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -157,8 +159,8 @@ style concurrently: 2. Upgrade all affected Telegraf clients to version >=1.6. New measurements will be created with the `name_suffix`, for example:: - - `mysql_v2` - - `mysql_variables_v2` + * `mysql_v2` + * `mysql_variables_v2` 3. Update charts, alerts, and other supporting code to the new format. 4. You can now remove the old `mysql` plugin configuration and remove old @@ -169,6 +171,7 @@ historical data to the default name. Do this only after retiring the old measurement name. 1. Use the technique described above to write to multiple locations: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -180,8 +183,10 @@ measurement name. servers = ["tcp(127.0.0.1:3306)/"] ``` + 2. Create a TICKScript to copy the historical data: - ``` + + ```sql dbrp "telegraf"."autogen" batch @@ -195,17 +200,23 @@ measurement name. .retentionPolicy('autogen') .measurement('mysql') ``` + 3. Define a task for your script: + ```sh kapacitor define copy-measurement -tick copy-measurement.task ``` + 4. Run the task over the data you would like to migrate: + ```sh kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement ``` + 5. Verify copied data and repeat for other measurements. -### Metrics: +## Metrics + * Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES` * Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES` * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when @@ -214,141 +225,142 @@ then everything works differently, this metric does not work with multi-source replication, unless you set `gather_all_slave_channels = true`. For MariaDB, `mariadb_dialect = true` should be set to address the field names and commands differences. - * slave_[column name]() + * slave_[column name] * Binary logs - all metrics including size and count of all binary files. Requires to be turned on in configuration. - * binary_size_bytes(int, number) - * binary_files_count(int, number) + * binary_size_bytes(int, number) + * binary_files_count(int, number) * Process list - connection metrics from processlist for each user. It has the following tags - * connections(int, number) + * connections(int, number) * User Statistics - connection metrics from user statistics for each user. It has the following fields - * access_denied - * binlog_bytes_written - * busy_time - * bytes_received - * bytes_sent - * commit_transactions - * concurrent_connections - * connected_time - * cpu_time - * denied_connections - * empty_queries - * hostlost_connections - * other_commands - * rollback_transactions - * rows_fetched - * rows_updated - * select_commands - * server - * table_rows_read - * total_connections - * total_ssl_connections - * update_commands - * user + * access_denied + * binlog_bytes_written + * busy_time + * bytes_received + * bytes_sent + * commit_transactions + * concurrent_connections + * connected_time + * cpu_time + * denied_connections + * empty_queries + * hostlost_connections + * other_commands + * rollback_transactions + * rows_fetched + * rows_updated + * select_commands + * server + * table_rows_read + * total_connections + * total_ssl_connections + * update_commands + * user * Perf Table IO waits - total count and time of I/O waits event for each table and process. It has following fields: - * table_io_waits_total_fetch(float, number) - * table_io_waits_total_insert(float, number) - * table_io_waits_total_update(float, number) - * table_io_waits_total_delete(float, number) - * table_io_waits_seconds_total_fetch(float, milliseconds) - * table_io_waits_seconds_total_insert(float, milliseconds) - * table_io_waits_seconds_total_update(float, milliseconds) - * table_io_waits_seconds_total_delete(float, milliseconds) + * table_io_waits_total_fetch(float, number) + * table_io_waits_total_insert(float, number) + * table_io_waits_total_update(float, number) + * table_io_waits_total_delete(float, number) + * table_io_waits_seconds_total_fetch(float, milliseconds) + * table_io_waits_seconds_total_insert(float, milliseconds) + * table_io_waits_seconds_total_update(float, milliseconds) + * table_io_waits_seconds_total_delete(float, milliseconds) * Perf index IO waits - total count and time of I/O waits event for each index and process. It has following fields: - * index_io_waits_total_fetch(float, number) - * index_io_waits_seconds_total_fetch(float, milliseconds) - * index_io_waits_total_insert(float, number) - * index_io_waits_total_update(float, number) - * index_io_waits_total_delete(float, number) - * index_io_waits_seconds_total_insert(float, milliseconds) - * index_io_waits_seconds_total_update(float, milliseconds) - * index_io_waits_seconds_total_delete(float, milliseconds) + * index_io_waits_total_fetch(float, number) + * index_io_waits_seconds_total_fetch(float, milliseconds) + * index_io_waits_total_insert(float, number) + * index_io_waits_total_update(float, number) + * index_io_waits_total_delete(float, number) + * index_io_waits_seconds_total_insert(float, milliseconds) + * index_io_waits_seconds_total_update(float, milliseconds) + * index_io_waits_seconds_total_delete(float, milliseconds) * Info schema autoincrement statuses - autoincrement fields and max values for them. It has following fields: - * auto_increment_column(int, number) - * auto_increment_column_max(int, number) + * auto_increment_column(int, number) + * auto_increment_column_max(int, number) * InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a status "enabled" * Perf table lock waits - gathers total number and time for SQL and external lock waits events for each table and operation. It has following fields. The unit of fields varies by the tags. - * read_normal(float, number/milliseconds) - * read_with_shared_locks(float, number/milliseconds) - * read_high_priority(float, number/milliseconds) - * read_no_insert(float, number/milliseconds) - * write_normal(float, number/milliseconds) - * write_allow_write(float, number/milliseconds) - * write_concurrent_insert(float, number/milliseconds) - * write_low_priority(float, number/milliseconds) - * read(float, number/milliseconds) - * write(float, number/milliseconds) + * read_normal(float, number/milliseconds) + * read_with_shared_locks(float, number/milliseconds) + * read_high_priority(float, number/milliseconds) + * read_no_insert(float, number/milliseconds) + * write_normal(float, number/milliseconds) + * write_allow_write(float, number/milliseconds) + * write_concurrent_insert(float, number/milliseconds) + * write_low_priority(float, number/milliseconds) + * read(float, number/milliseconds) + * write(float, number/milliseconds) * Perf events waits - gathers total time and number of event waits - * events_waits_total(float, number) - * events_waits_seconds_total(float, milliseconds) + * events_waits_total(float, number) + * events_waits_seconds_total(float, milliseconds) * Perf file events statuses - gathers file events statuses - * file_events_total(float,number) - * file_events_seconds_total(float, milliseconds) - * file_events_bytes_total(float, bytes) + * file_events_total(float,number) + * file_events_seconds_total(float, milliseconds) + * file_events_bytes_total(float, bytes) * Perf events statements - gathers attributes of each event - * events_statements_total(float, number) - * events_statements_seconds_total(float, millieconds) - * events_statements_errors_total(float, number) - * events_statements_warnings_total(float, number) - * events_statements_rows_affected_total(float, number) - * events_statements_rows_sent_total(float, number) - * events_statements_rows_examined_total(float, number) - * events_statements_tmp_tables_total(float, number) - * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totals(float, number) - * events_statements_sort_rows_total(float, number) - * events_statements_no_index_used_total(float, number) + * events_statements_total(float, number) + * events_statements_seconds_total(float, millieconds) + * events_statements_errors_total(float, number) + * events_statements_warnings_total(float, number) + * events_statements_rows_affected_total(float, number) + * events_statements_rows_sent_total(float, number) + * events_statements_rows_examined_total(float, number) + * events_statements_tmp_tables_total(float, number) + * events_statements_tmp_disk_tables_total(float, number) + * events_statements_sort_merge_passes_totals(float, number) + * events_statements_sort_rows_total(float, number) + * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements - * info_schema_table_rows(float, number) - * info_schema_table_size_data_length(float, number) - * info_schema_table_size_index_length(float, number) - * info_schema_table_size_data_free(float, number) - * info_schema_table_version(float, number) + * info_schema_table_rows(float, number) + * info_schema_table_size_data_length(float, number) + * info_schema_table_size_index_length(float, number) + * info_schema_table_size_data_free(float, number) + * info_schema_table_version(float, number) ## Tags + * All measurements has following tags - * server (the host name from which the metrics are gathered) + * server (the host name from which the metrics are gathered) * Process list measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * User Statistics measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * Perf table IO waits measurement has following tags - * schema - * name (object name for event or process) + * schema + * name (object name for event or process) * Perf index IO waits has following tags - * schema - * name - * index + * schema + * name + * index * Info schema autoincrement statuses has following tags - * schema - * table - * column + * schema + * table + * column * Perf table lock waits has following tags - * schema - * table - * sql_lock_waits_total(fields including this tag have numeric unit) - * external_lock_waits_total(fields including this tag have numeric unit) - * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) - * external_lock_waits_seconds_total(fields including this tag have millisecond unit) + * schema + * table + * sql_lock_waits_total(fields including this tag have numeric unit) + * external_lock_waits_total(fields including this tag have numeric unit) + * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) + * external_lock_waits_seconds_total(fields including this tag have millisecond unit) * Perf events statements has following tags - * event_name + * event_name * Perf file events statuses has following tags - * event_name - * mode + * event_name + * mode * Perf file events statements has following tags - * schema - * digest - * digest_text + * schema + * digest + * digest_text * Table schema has following tags - * schema - * table - * component - * type - * engine - * row_format - * create_options + * schema + * table + * component + * type + * engine + * row_format + * create_options diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 6e81b3df2f757..3fbd4654ef2b4 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1,7 +1,6 @@ package mysql import ( - "bytes" "database/sql" "fmt" "strconv" @@ -10,6 +9,7 @@ import ( "time" "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -637,7 +637,12 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu value, err := m.parseGlobalVariables(key, val) if err != nil { - m.Log.Debugf("Error parsing global variable %q: %v", key, err) + errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } } else { fields[key] = value } @@ -657,11 +662,7 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { if m.MetricVersion < 2 { - v, ok := v1.ParseValue(value) - if ok { - return v, nil - } - return v, fmt.Errorf("could not parse value: %q", string(value)) + return v1.ParseValue(value) } return v2.ConvertGlobalVariables(key, value) } @@ -692,35 +693,58 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu // scanning keys and values separately // get columns names, and create an array with its length - cols, err := rows.Columns() + cols, err := rows.ColumnTypes() if err != nil { return err } - vals := make([]interface{}, len(cols)) + vals := make([]sql.RawBytes, len(cols)) + valPtrs := make([]interface{}, len(cols)) // fill the array with sql.Rawbytes for i := range vals { - vals[i] = &sql.RawBytes{} + vals[i] = sql.RawBytes{} + valPtrs[i] = &vals[i] } - if err = rows.Scan(vals...); err != nil { + if err = rows.Scan(valPtrs...); err != nil { return err } + // range over columns, and try to parse values for i, col := range cols { + colName := col.Name() + if m.MetricVersion >= 2 { - col = strings.ToLower(col) + colName = strings.ToLower(colName) } + colValue := vals[i] + if m.GatherAllSlaveChannels && - (strings.ToLower(col) == "channel_name" || strings.ToLower(col) == "connection_name") { + (strings.ToLower(colName) == "channel_name" || strings.ToLower(colName) == "connection_name") { // Since the default channel name is empty, we need this block channelName := "default" - if len(*vals[i].(*sql.RawBytes)) > 0 { - channelName = string(*vals[i].(*sql.RawBytes)) + if len(colValue) > 0 { + channelName = string(colValue) } tags["channel"] = channelName - } else if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { - fields["slave_"+col] = value + continue + } + + if colValue == nil || len(colValue) == 0 { + continue + } + + value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) + if err != nil { + errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } + continue } + + fields["slave_"+colName] = value } acc.AddFields("mysql", fields, tags) @@ -876,7 +900,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum key = strings.ToLower(key) value, err := v2.ConvertGlobalStatus(key, val) if err != nil { - m.Log.Debugf("Error parsing global status: %v", err) + acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) } else { fields[key] = value } @@ -905,6 +929,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return err } defer rows.Close() + var ( command string state string @@ -948,6 +973,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. if err != nil { return err } + defer connRows.Close() for connRows.Next() { var user string @@ -1343,10 +1369,16 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu if err := rows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { - fields[key] = value + value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") + if err != nil { + acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) + continue } + + fields[key] = value + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_innodb", fields, tags) @@ -1812,123 +1844,121 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula } for _, database := range dbList { - rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + err := m.gatherSchemaForDB(db, database, servtag, acc) if err != nil { return err } - defer rows.Close() - var ( - tableSchema string - tableName string - tableType string - engine string - version float64 - rowFormat string - tableRows float64 - dataLength float64 - indexLength float64 - dataFree float64 - createOptions string + } + return nil +} + +func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, acc telegraf.Accumulator) error { + rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + if err != nil { + return err + } + defer rows.Close() + + var ( + tableSchema string + tableName string + tableType string + engine string + version float64 + rowFormat string + tableRows float64 + dataLength float64 + indexLength float64 + dataFree float64 + createOptions string + ) + + for rows.Next() { + err = rows.Scan( + &tableSchema, + &tableName, + &tableType, + &engine, + &version, + &rowFormat, + &tableRows, + &dataLength, + &indexLength, + &dataFree, + &createOptions, ) - for rows.Next() { - err = rows.Scan( - &tableSchema, - &tableName, - &tableType, - &engine, - &version, - &rowFormat, - &tableRows, - &dataLength, - &indexLength, - &dataFree, - &createOptions, - ) - if err != nil { - return err - } - tags := map[string]string{"server": servtag} - tags["schema"] = tableSchema - tags["table"] = tableName + if err != nil { + return err + } + tags := map[string]string{"server": servtag} + tags["schema"] = tableSchema + tags["table"] = tableName - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_rows"), - map[string]interface{}{"value": tableRows}, tags) - - dlTags := copyTags(tags) - dlTags["component"] = "data_length" - acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), - map[string]interface{}{"value": dataLength}, dlTags) - - ilTags := copyTags(tags) - ilTags["component"] = "index_length" - acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), - map[string]interface{}{"value": indexLength}, ilTags) - - dfTags := copyTags(tags) - dfTags["component"] = "data_free" - acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), - map[string]interface{}{"value": dataFree}, dfTags) - } else { - acc.AddFields("mysql_table_schema", - map[string]interface{}{"rows": tableRows}, tags) + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) + + dlTags := copyTags(tags) + dlTags["component"] = "data_length" + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) + + ilTags := copyTags(tags) + ilTags["component"] = "index_length" + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) + + dfTags := copyTags(tags) + dfTags["component"] = "data_free" + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) + } else { + acc.AddFields("mysql_table_schema", + map[string]interface{}{"rows": tableRows}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_length": dataLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_length": dataLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"index_length": indexLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"index_length": indexLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_free": dataFree}, tags) - } + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_free": dataFree}, tags) + } - versionTags := copyTags(tags) - versionTags["type"] = tableType - versionTags["engine"] = engine - versionTags["row_format"] = rowFormat - versionTags["create_options"] = createOptions + versionTags := copyTags(tags) + versionTags["type"] = tableType + versionTags["engine"] = engine + versionTags["row_format"] = rowFormat + versionTags["create_options"] = createOptions - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_version"), - map[string]interface{}{"value": version}, versionTags) - } else { - acc.AddFields("mysql_table_schema_version", - map[string]interface{}{"table_version": version}, versionTags) - } + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) + } else { + acc.AddFields("mysql_table_schema_version", + map[string]interface{}{"table_version": version}, versionTags) } } return nil } -func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { +func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeName string) (interface{}, error) { if m.MetricVersion < 2 { return v1.ParseValue(value) } - return parseValue(value) -} - -// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 -func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { - return 1, true - } - - if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { - return 0, true - } - - if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { - return val, true - } - if val, err := strconv.ParseFloat(string(value), 64); err == nil { - return val, true - } - if len(string(value)) > 0 { - return string(value), true + switch databaseTypeName { + case "INT": + return v2.ParseInt(value) + case "BIGINT": + return v2.ParseUint(value) + case "VARCHAR": + return v2.ParseString(value) + default: + m.Log.Debugf("unknown database type name %q in parseValueByDatabaseTypeName", databaseTypeName) + return v2.ParseValue(value) } - return nil, false } // findThreadState can be used to find thread state by command and plain state diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index 0cdcd4b1cd345..868c86f18b9cb 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -1,13 +1,12 @@ package mysql import ( - "database/sql" "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMysqlDefaultsToLocalIntegration(t *testing.T) { @@ -23,7 +22,7 @@ func TestMysqlDefaultsToLocalIntegration(t *testing.T) { err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) } func TestMysqlMultipleInstancesIntegration(t *testing.T) { @@ -43,9 +42,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { var acc, acc2 testutil.Accumulator err := m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) // acc should have global variables - assert.True(t, acc.HasMeasurement("mysql_variables")) + require.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ Servers: []string{testServer}, @@ -53,9 +52,9 @@ func TestMysqlMultipleInstancesIntegration(t *testing.T) { } err = m2.Gather(&acc2) require.NoError(t, err) - assert.True(t, acc2.HasMeasurement("mysql")) + require.True(t, acc2.HasMeasurement("mysql")) // acc2 should not have global variables - assert.False(t, acc2.HasMeasurement("mysql_variables")) + require.False(t, acc2.HasMeasurement("mysql_variables")) } func TestMysqlMultipleInits(t *testing.T) { @@ -65,16 +64,16 @@ func TestMysqlMultipleInits(t *testing.T) { m2 := &Mysql{} m.InitMysql() - assert.True(t, m.initDone) - assert.False(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.False(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) m2.InitMysql() - assert.True(t, m.initDone) - assert.True(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.True(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) } func TestMysqlGetDSNTag(t *testing.T) { @@ -178,31 +177,7 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } } } -func TestParseValue(t *testing.T) { - testCases := []struct { - rawByte sql.RawBytes - output interface{} - boolValue bool - }{ - {sql.RawBytes("123"), int64(123), true}, - {sql.RawBytes("abc"), "abc", true}, - {sql.RawBytes("10.1"), 10.1, true}, - {sql.RawBytes("ON"), 1, true}, - {sql.RawBytes("OFF"), 0, true}, - {sql.RawBytes("NO"), 0, true}, - {sql.RawBytes("YES"), 1, true}, - {sql.RawBytes("No"), 0, true}, - {sql.RawBytes("Yes"), 1, true}, - {sql.RawBytes("-794"), int64(-794), true}, - {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), true}, - {sql.RawBytes(""), nil, false}, - } - for _, cases := range testCases { - if got, ok := parseValue(cases.rawByte); got != cases.output && ok != cases.boolValue { - t.Errorf("for %s wanted %t, got %t", string(cases.rawByte), cases.output, got) - } - } -} + func TestNewNamespace(t *testing.T) { testCases := []struct { words []string diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 374782f9cb29a..7f4e1a7dcacae 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -182,14 +182,14 @@ var Mappings = []*Mapping{ }, } -func ParseValue(value sql.RawBytes) (float64, bool) { +func ParseValue(value sql.RawBytes) (float64, error) { if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { - return 1, true + return 1, nil } if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { - return 0, true + return 0, nil } n, err := strconv.ParseFloat(string(value), 64) - return n, err == nil + return n, err } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index d5b73ec7f4c1e..b446890c9baec 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -25,6 +25,10 @@ func ParseUint(value sql.RawBytes) (interface{}, error) { return strconv.ParseUint(string(value), 10, 64) } +func ParseFloat(value sql.RawBytes) (interface{}, error) { + return strconv.ParseFloat(string(value), 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -86,11 +90,15 @@ var GlobalStatusConversions = map[string]ConversionFunc{ "innodb_data_pending_fsyncs": ParseUint, "ssl_ctx_verify_depth": ParseUint, "ssl_verify_depth": ParseUint, + + // see https://galeracluster.com/library/documentation/galera-status-variables.html + "wsrep_local_index": ParseUint, + "wsrep_local_send_queue_avg": ParseFloat, } -// see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html -// see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html var GlobalVariableConversions = map[string]ConversionFunc{ + // see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html + // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html "delay_key_write": ParseString, // ON, OFF, ALL "enforce_gtid_consistency": ParseString, // ON, OFF, WARN "event_scheduler": ParseString, // YES, NO, DISABLED diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 43133eeb39c1b..95083a1e5016f 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -2,6 +2,7 @@ package v2 import ( "database/sql" + "strings" "testing" "github.com/stretchr/testify/require" @@ -84,3 +85,43 @@ func TestCovertGlobalVariables(t *testing.T) { }) } } + +func TestParseValue(t *testing.T) { + testCases := []struct { + rawByte sql.RawBytes + output interface{} + err string + }{ + {sql.RawBytes("123"), int64(123), ""}, + {sql.RawBytes("abc"), "abc", ""}, + {sql.RawBytes("10.1"), 10.1, ""}, + {sql.RawBytes("ON"), 1, ""}, + {sql.RawBytes("OFF"), 0, ""}, + {sql.RawBytes("NO"), 0, ""}, + {sql.RawBytes("YES"), 1, ""}, + {sql.RawBytes("No"), 0, ""}, + {sql.RawBytes("Yes"), 1, ""}, + {sql.RawBytes("-794"), int64(-794), ""}, + {sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32 + {sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32 + {sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64 + {sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64 + {sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64 + {sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes(""), nil, "unconvertible value"}, + } + for _, cases := range testCases { + got, err := ParseValue(cases.rawByte) + + if err != nil && cases.err == "" { + t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error()) + } else if err != nil && !strings.HasPrefix(err.Error(), cases.err) { + t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error()) + } else if err == nil && cases.err != "" { + t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err) + } else if got != cases.output { + t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got) + } + } +} diff --git a/plugins/inputs/nats/README.md b/plugins/inputs/nats/README.md index 362ee17b2aa65..59262bfd6cb52 100644 --- a/plugins/inputs/nats/README.md +++ b/plugins/inputs/nats/README.md @@ -3,7 +3,7 @@ The [NATS](http://www.nats.io/about/) monitoring plugin gathers metrics from the NATS [monitoring http server](https://www.nats.io/documentation/server/gnatsd-monitoring/). -### Configuration +## Configuration ```toml [[inputs.nats]] @@ -14,7 +14,7 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats # response_timeout = "5s" ``` -### Metrics: +## Metrics - nats - tags @@ -35,8 +35,8 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats - out_msgs (integer, count) - in_bytes (integer, bytes) -### Example Output: +## Example Output -``` +```shell nats,server=http://localhost:8222 uptime=117158348682i,mem=6647808i,subscriptions=0i,out_bytes=0i,connections=0i,in_msgs=0i,total_connections=0i,cores=2i,cpu=0,slow_consumers=0i,routes=0i,remotes=0i,out_msgs=0i,in_bytes=0i 1517015107000000000 ``` diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 7144355096b4e..c9e99824d4de5 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -11,10 +11,11 @@ import ( "path" "time" + gnatsd "github.com/nats-io/nats-server/v2/server" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { @@ -41,16 +42,16 @@ func (n *Nats) Description() string { } func (n *Nats) Gather(acc telegraf.Accumulator) error { - url, err := url.Parse(n.Server) + address, err := url.Parse(n.Server) if err != nil { return err } - url.Path = path.Join(url.Path, "varz") + address.Path = path.Join(address.Path, "varz") if n.client == nil { n.client = n.createHTTPClient() } - resp, err := n.client.Get(url.String()) + resp, err := n.client.Get(address.String()) if err != nil { return err } diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index ae40d9185100a..9e46bf4ebcc99 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -6,7 +6,7 @@ creates metrics using one of the supported [input data formats][]. A [Queue Group][queue group] is used when subscribing to subjects so multiple instances of telegraf can read from a NATS cluster in parallel. -### Configuration: +## Configuration ```toml [[inputs.nats_consumer]] diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md index 6fd28a16a6d21..df0de4574eca4 100644 --- a/plugins/inputs/neptune_apex/README.md +++ b/plugins/inputs/neptune_apex/README.md @@ -6,8 +6,7 @@ in the telegraf.conf configuration file. The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-time data from the Apex's status.xml page. - -### Configuration +## Configuration ```toml [[inputs.neptune_apex]] @@ -25,7 +24,7 @@ The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-t ``` -### Metrics +## Metrics The Neptune Apex controller family allows an aquarium hobbyist to monitor and control their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified @@ -62,38 +61,42 @@ programming. These tags are clearly marked in the list below and should be consi - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none" - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none" - serial (string, serial number) - - time: - - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + - time: + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. - -### Sample Queries - +## Sample Queries Get the max, mean, and min for the temperature in the last hour: + ```sql SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s) ``` -### Troubleshooting +## Troubleshooting + +### sendRequest failure -#### sendRequest failure This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl: + ```sh -$ curl apex.local/cgi-bin/status.xml +curl apex.local/cgi-bin/status.xml ``` + to isolate the problem. -#### parseXML errors +### parseXML errors + Ensure the XML being returned is valid. If you get valid XML back, open a bug request. -#### Missing fields/data +### Missing fields/data + The neptune_apex plugin is strict on its input to prevent any conversion errors. If you have fields in the status.xml output that are not converted to a metric, open a feature request and paste your whole status.xml -### Example Output +## Example Output -``` +```text neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 @@ -138,7 +141,7 @@ neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex ``` -### Contributing +## Contributing This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index c2bb05384d7c8..a8934bd01ee94 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -245,7 +245,7 @@ func findProbe(probe string, probes []probe) int { // returns a time.Time struct. func parseTime(val string, tz float64) (time.Time, error) { // Magic time constant from https://golang.org/pkg/time/#Parse - const TimeLayout = "01/02/2006 15:04:05 -0700" + const timeLayout = "01/02/2006 15:04:05 -0700" // Timezone offset needs to be explicit sign := '+' @@ -256,7 +256,7 @@ func parseTime(val string, tz float64) (time.Time, error) { // Build a time string with the timezone in a format Go can parse. tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) ts := fmt.Sprintf("%s %s", val, tzs) - t, err := time.Parse(TimeLayout, ts) + t, err := time.Parse(timeLayout, ts) if err != nil { return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) } diff --git a/plugins/inputs/net/NETSTAT_README.md b/plugins/inputs/net/NETSTAT_README.md index d0f39f5e400e6..95a9ad6124d2e 100644 --- a/plugins/inputs/net/NETSTAT_README.md +++ b/plugins/inputs/net/NETSTAT_README.md @@ -2,7 +2,7 @@ This plugin collects TCP connections state and UDP socket counts by using `lsof`. -### Configuration: +## Configuration ``` toml # Collect TCP connections state and UDP socket counts @@ -10,7 +10,7 @@ This plugin collects TCP connections state and UDP socket counts by using `lsof` # no configuration ``` -# Measurements: +## Measurements Supported TCP Connection states are follows. @@ -27,12 +27,14 @@ Supported TCP Connection states are follows. - closing - none -### TCP Connection State measurements: +## TCP Connection State measurements Meta: + - units: counts Measurement names: + - tcp_established - tcp_syn_sent - tcp_syn_recv @@ -48,10 +50,12 @@ Measurement names: If there are no connection on the state, the metric is not counted. -### UDP socket counts measurements: +## UDP socket counts measurements Meta: + - units: counts Measurement names: + - udp_socket diff --git a/plugins/inputs/net/NET_README.md b/plugins/inputs/net/NET_README.md index d2571d29e9ede..243293b93532d 100644 --- a/plugins/inputs/net/NET_README.md +++ b/plugins/inputs/net/NET_README.md @@ -2,7 +2,7 @@ This plugin gathers metrics about network interface and protocol usage (Linux only). -### Configuration: +## Configuration ```toml # Gather metrics about network interfaces @@ -21,7 +21,7 @@ This plugin gathers metrics about network interface and protocol usage (Linux on ## ``` -### Measurements & Fields: +## Measurements & Fields The fields from this plugin are gathered in the _net_ measurement. @@ -42,14 +42,14 @@ Under freebsd/openbsd and darwin the plugin uses netstat. Additionally, for the time being _only under Linux_, the plugin gathers system wide stats for different network protocols using /proc/net/snmp (tcp, udp, icmp, etc.). Explanation of the different metrics exposed by snmp is out of the scope of this document. The best way to find information would be tracing the constants in the Linux kernel source [here](https://elixir.bootlin.com/linux/latest/source/net/ipv4/proc.c) and their usage. If /proc/net/snmp cannot be read for some reason, telegraf ignores the error silently. -### Tags: +## Tags * Net measurements have the following tags: - - interface (the interface from which metrics are gathered) + * interface (the interface from which metrics are gathered) Under Linux the system wide protocol metrics have the interface=all tag. -### Sample Queries: +## Sample Queries You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values. @@ -57,15 +57,15 @@ You can use the following query to get the upload/download traffic rate per seco SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0); ``` -### Example Output: +## Example Output -``` +```shell # All platforms $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 ``` -``` +```shell # Linux $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 3c4c3c7ef8d84..9ef3b6fb0d91c 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -6,7 +6,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/net" + "github.com/shirou/gopsutil/v3/net" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 2c492408beef2..e64a7ebf605cc 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -3,7 +3,7 @@ The input plugin test UDP/TCP connections response time and can optional verify text in the response. -### Configuration: +## Configuration ```toml # Collect response time of a TCP or UDP connection @@ -33,7 +33,7 @@ verify text in the response. # fielddrop = ["result_type", "string_found"] ``` -### Metrics: +## Metrics - net_response - tags: @@ -47,9 +47,9 @@ verify text in the response. - result_type (string) **DEPRECATED in 1.7; use result tag** - string_found (boolean) **DEPRECATED in 1.4; use result tag** -### Example Output: +## Example Output -``` +```shell net_response,port=8086,protocol=tcp,result=success,server=localhost response_time=0.000092948,result_code=0i,result_type="success" 1525820185000000000 net_response,port=8080,protocol=tcp,result=connection_failed,server=localhost result_code=2i,result_type="connection_failed" 1525820088000000000 net_response,port=8080,protocol=udp,result=read_failed,server=localhost result_code=3i,result_type="read_failed",string_found=false 1525820088000000000 diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index a7fcec4353c81..043a3c44760ed 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -17,10 +17,10 @@ type ResultType uint64 const ( Success ResultType = 0 - Timeout = 1 - ConnectionFailed = 2 - ReadFailed = 3 - StringMismatch = 4 + Timeout ResultType = 1 + ConnectionFailed ResultType = 2 + ReadFailed ResultType = 3 + StringMismatch ResultType = 4 ) // NetResponse struct @@ -120,8 +120,8 @@ func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, er setResult(ReadFailed, fields, tags, n.Expect) } else { // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(data) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -186,8 +186,8 @@ func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, er } // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(buf)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(string(buf)) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -232,22 +232,25 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} var returnTags map[string]string + // Gather data - if n.Protocol == "tcp" { + switch n.Protocol { + case "tcp": returnTags, fields, err = n.TCPGather() if err != nil { return err } tags["protocol"] = "tcp" - } else if n.Protocol == "udp" { + case "udp": returnTags, fields, err = n.UDPGather() if err != nil { return err } tags["protocol"] = "udp" - } else { + default: return errors.New("bad protocol") } + // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md index 1ed1a08424bbb..b28ceb32cc754 100644 --- a/plugins/inputs/nfsclient/README.md +++ b/plugins/inputs/nfsclient/README.md @@ -5,7 +5,7 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed **NOTE** Many of the metrics, even if tagged with a mount point, are really _per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and `nfs01:/vol/foo/baz`, there will be two near identical entries in /proc/self/mountstats. This is a limitation of the metrics exposed by the kernel, not the telegraf plugin. -### Configuration +## Configuration ```toml [[inputs.nfsclient]] @@ -35,7 +35,9 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed # include_operations = [] # exclude_operations = [] ``` -#### Configuration Options + +### Configuration Options + - **fullstat** bool: Collect per-operation type metrics. Defaults to false. - **include_mounts** list(string): gather metrics for only these mounts. Default is to watch all mounts. - **exclude_mounts** list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. @@ -44,121 +46,119 @@ If `fullstat` is set, a great deal of additional metrics are collected, detailed *N.B.* the `include_mounts` and `exclude_mounts` arguments are both applied to the local mount location (e.g. /mnt/NFS), not the server export (e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. -#### References +### References + 1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) 2. [net/sunrpc/stats.c - Linux source code](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/stats.c) 3. [What is in /proc/self/mountstats for NFS mounts: an introduction](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex) 4. [The xprt: data for NFS mounts in /proc/self/mountstats](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsXprt) +## Metrics - -### Metrics - -#### Fields +### Fields - nfsstat - - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) - - ops (integer, count) - The number of operations of this type executed. - - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) - - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. - - rtt (integer, miliseconds) - The round-trip time for operations. + - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent *and* received, including overhead *and* payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) + - ops (integer, count) - The number of operations of this type executed. + - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) + - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. + - rtt (integer, miliseconds) - The round-trip time for operations. In addition enabling `fullstat` will make many more metrics available. -#### Tags +### Tags - All measurements have the following tags: - - mountpoint - The local mountpoint, for instance: "/var/www" - - serverexport - The full server export, for instance: "nfsserver.example.org:/export" + - mountpoint - The local mountpoint, for instance: "/var/www" + - serverexport - The full server export, for instance: "nfsserver.example.org:/export" - Measurements nfsstat and nfs_ops will also include: - - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. - - + - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. -### Additional metrics +## Additional metrics When `fullstat` is true, additional measurements are collected. Tags are the same as above. -#### NFS Operations +### NFS Operations Most descriptions come from Reference [[3](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex)] and `nfs_iostat.h`. Field order and names are the same as in `/proc/self/mountstats` and the Kernel source. Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes occasionally. - nfs_bytes - - fields: - - normalreadbytes (int, bytes): Bytes read from the server via `read()` - - normalwritebytes (int, bytes): Bytes written to the server via `write()` - - directreadbytes (int, bytes): Bytes read with O_DIRECT set - - directwritebytes (int, bytes): Bytes written with O_DIRECT set - - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) - - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) - - readpages (int, count): Number of pages read - - writepages (int, count): Number of pages written + - fields: + - normalreadbytes (int, bytes): Bytes read from the server via `read()` + - normalwritebytes (int, bytes): Bytes written to the server via `write()` + - directreadbytes (int, bytes): Bytes read with O_DIRECT set + - directwritebytes (int, bytes): Bytes written with O_DIRECT set + - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) + - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) + - readpages (int, count): Number of pages read + - writepages (int, count): Number of pages written - nfs_events (Per-event metrics) - - fields: - - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. - - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. - - datainvalidates (int, count): How many times an inode had its cached data thrown out. - - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. - - vfsopen (int, count): How many times files or directories have been `open()`'d. - - vfslookup (int, count): How many name lookups in directories there have been. - - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") - - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. - - vfsreadpage (int, count): Number of pages read. - - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). - - vfswritepage (int, count): Number of pages written. - - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) - - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") - - vfssetattr (int, count): How many times we've set attributes on inodes. - - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. - - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. - - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). - - vfsrelease (int, count): Number of calls to `close()`. - - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. - - setattrtrunc (int, count): How many times files have had their size truncated. - - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. - - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) - - shortreads (int, count): Number of times the NFS server returned less data than requested. - - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. - - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) - - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. - - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. + - fields: + - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. + - datainvalidates (int, count): How many times an inode had its cached data thrown out. + - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. + - vfsopen (int, count): How many times files or directories have been `open()`'d. + - vfslookup (int, count): How many name lookups in directories there have been. + - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") + - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. + - vfsreadpage (int, count): Number of pages read. + - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage (int, count): Number of pages written. + - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr (int, count): How many times we've set attributes on inodes. + - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. + - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. + - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease (int, count): Number of calls to `close()`. + - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc (int, count): How many times files have had their size truncated. + - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads (int, count): Number of times the NFS server returned less data than requested. + - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. + - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. + - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. - nfs_xprt_tcp - - fields: - - bind_count (int, count): Number of _completely new_ mounts to this server (sometimes 0?) - - connect_count (int, count): How many times the client has connected to the server in question - - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. - - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. - - rpcsends (int, count): How many RPC requests this mount has sent to the server. - - rpcreceives (int, count): How many RPC replies this mount has received from the server. - - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. - - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) - - backlogutil (int, count): Cumulative backlog count + - fields: + - bind_count (int, count): Number of_completely new_ mounts to this server (sometimes 0?) + - connect_count (int, count): How many times the client has connected to the server in question + - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends (int, count): How many RPC requests this mount has sent to the server. + - rpcreceives (int, count): How many RPC replies this mount has received from the server. + - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. + - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil (int, count): Cumulative backlog count - nfs_xprt_udp - - fields: - - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] + - fields: + - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] - nfs_ops - - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): - - ops (int, count): Total operations of this type. - - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). - - timeouts (int, count): Number of major timeouts. - - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). - - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). - - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. - - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. - - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. - - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 - - -### Example Output + - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): + - ops (int, count): Total operations of this type. + - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts (int, count): Number of major timeouts. + - bytes_sent (int, count): Bytes received, including headers (should also be close to on-wire size). + - bytes_recv (int, count): Bytes sent, including headers (should be close to on-wire size). + - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. + - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. + - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. + - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + +## Example Output + For basic metrics showing server-wise read and write data. -``` + +```shell nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 @@ -168,7 +168,7 @@ For `fullstat=true` metrics, which includes additional measurements for `nfs_byt Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and NULL shown. Please refer to `/proc/self/mountstats` for a list of supported NFS operations, as it changes as it changes periodically. -``` +```shell nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 @@ -177,5 +177,3 @@ nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0 nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 ``` - - diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index bc4916507ef25..4859aa74c96f9 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -1,6 +1,6 @@ # Nginx Input Plugin -### Configuration: +## Configuration ```toml # Read Nginx's basic status information (ngx_http_stub_status_module) @@ -19,26 +19,27 @@ response_timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields - Measurement - - accepts - - active - - handled - - reading - - requests - - waiting - - writing + - accepts + - active + - handled + - reading + - requests + - waiting + - writing -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx]] ## An array of Nginx stub_status URI to gather stats. @@ -46,12 +47,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx --test ``` It produces: -``` + +```shell * Plugin: nginx, Collection 1 > nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331 ``` diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index db30304dcc15a..5a947e7e202e0 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const nginxSampleResponse = ` @@ -33,7 +33,7 @@ func TestNginxTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/nginx_plus/README.md b/plugins/inputs/nginx_plus/README.md index cb0713ed848ff..5afb82d2f7c7e 100644 --- a/plugins/inputs/nginx_plus/README.md +++ b/plugins/inputs/nginx_plus/README.md @@ -5,7 +5,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use Structures for Nginx Plus have been built based on history of [status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html) -### Configuration: +## Configuration ```toml # Read Nginx Plus' advanced status information @@ -14,7 +14,7 @@ Structures for Nginx Plus have been built based on history of urls = ["http://localhost/status"] ``` -### Measurements & Fields: +## Measurements & Fields - nginx_plus_processes - respawned @@ -59,8 +59,7 @@ Structures for Nginx Plus have been built based on history of - fails - downtime - -### Tags: +## Tags - nginx_plus_processes, nginx_plus_connections, nginx_plus_ssl, nginx_plus_requests - server @@ -78,9 +77,10 @@ Structures for Nginx Plus have been built based on history of - port - upstream_address -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus]] ## An array of Nginx Plus status URIs to gather stats. @@ -88,12 +88,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus -test ``` It produces: -``` + +```text * Plugin: inputs.nginx_plus, Collection 1 > nginx_plus_processes,server=localhost,port=12021,host=word.local respawned=0i 1505782513000000000 > nginx_plus_connections,server=localhost,port=12021,host=word.local accepted=5535735212i,dropped=10140186i,active=9541i,idle=67540i 1505782513000000000 diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md index 57cb127b5dd12..3d8d9bf07a6fc 100644 --- a/plugins/inputs/nginx_plus_api/README.md +++ b/plugins/inputs/nginx_plus_api/README.md @@ -2,7 +2,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). -### Configuration: +## Configuration ```toml # Read Nginx Plus API advanced status information @@ -13,7 +13,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use # api_version = 3 ``` -### Migration from Nginx Plus (Status) input plugin +## Migration from Nginx Plus (Status) input plugin | Nginx Plus | Nginx Plus API | |---------------------------------|--------------------------------------| @@ -29,7 +29,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | | nginx.stream.zone | nginx_plus_api_stream_server_zones | -### Measurements by API version +## Measurements by API version | Measurement | API version (api_version) | |--------------------------------------|---------------------------| @@ -47,7 +47,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_api_http_location_zones | >= 5 | | nginx_plus_api_resolver_zones | >= 5 | -### Measurements & Fields: +## Measurements & Fields - nginx_plus_api_processes - respawned @@ -171,7 +171,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - timedout - unknown -### Tags: +## Tags - nginx_plus_api_processes, nginx_plus_api_connections, nginx_plus_api_ssl, nginx_plus_api_http_requests - source @@ -198,9 +198,10 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - source - port -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus_api]] ## An array of Nginx Plus API URIs to gather stats. @@ -208,12 +209,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus_api -test ``` It produces: -``` + +```text > nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000 > nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000 > nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000 diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 5cd7e76aec439..81f747d86d825 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -49,11 +49,11 @@ func addError(acc telegraf.Accumulator, err error) { } func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) - resp, err := n.client.Get(url) + address := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) + resp, err := n.client.Get(address) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", address, err) } defer resp.Body.Close() @@ -64,7 +64,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { // features are either optional, or only available in some versions return nil, errNotFound default: - return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", address, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] @@ -77,7 +77,7 @@ func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { return body, nil default: - return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + return nil, fmt.Errorf("%s returned unexpected content type %s", address, contentType) } } diff --git a/plugins/inputs/nginx_sts/README.md b/plugins/inputs/nginx_sts/README.md index 935bc9af83c62..7d23fd029dfb5 100644 --- a/plugins/inputs/nginx_sts/README.md +++ b/plugins/inputs/nginx_sts/README.md @@ -1,7 +1,7 @@ # Nginx Stream STS Input Plugin This plugin gathers Nginx status using external virtual host traffic status -module - https://github.com/vozlt/nginx-module-sts. This is an Nginx module +module - . This is an Nginx module that provides access to stream host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. For module configuration details please see its @@ -9,7 +9,7 @@ monitoring of Nginx plus. For module configuration details please see its Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[inputs.nginx_sts]] @@ -27,7 +27,7 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - nginx_sts_connections - tags: @@ -42,7 +42,7 @@ Telegraf minimum version: Telegraf 1.15.0 - handled - requests -+ nginx_sts_server +- nginx_sts_server - tags: - source - port @@ -77,7 +77,7 @@ Telegraf minimum version: Telegraf 1.15.0 - session_msec_counter - session_msec -+ nginx_sts_upstream +- nginx_sts_upstream - tags: - source - port @@ -106,9 +106,9 @@ Telegraf minimum version: Telegraf 1.15.0 - backup - down -### Example Output: +## Example Output -``` +```shell nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=1.2.3.4:8080 upstream_connect_msec_counter=0i,out_bytes=0i,down=false,connects=0i,session_msec=0i,upstream_session_msec=0i,upstream_session_msec_counter=0i,upstream_connect_msec=0i,upstream_firstbyte_msec_counter=0i,response_3xx_count=0i,session_msec_counter=0i,weight=1i,max_fails=1i,backup=false,upstream_firstbyte_msec=0i,in_bytes=0i,response_1xx_count=0i,response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,fail_timeout=10i 1584699180000000000 nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=9.8.7.6:8080 upstream_firstbyte_msec_counter=0i,response_2xx_count=0i,down=false,upstream_session_msec_counter=0i,out_bytes=0i,response_5xx_count=0i,weight=1i,max_fails=1i,fail_timeout=10i,connects=0i,session_msec_counter=0i,upstream_session_msec=0i,in_bytes=0i,response_1xx_count=0i,response_3xx_count=0i,response_4xx_count=0i,session_msec=0i,upstream_connect_msec=0i,upstream_connect_msec_counter=0i,upstream_firstbyte_msec=0i,backup=false 1584699180000000000 nginx_sts_server,host=localhost,port=80,source=127.0.0.1,zone=* response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,session_msec_counter=0i,in_bytes=0i,out_bytes=0i,session_msec=0i,response_1xx_count=0i,response_3xx_count=0i,connects=0i 1584699180000000000 diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md index 58bee07be931d..6e5974ebf801e 100644 --- a/plugins/inputs/nginx_upstream_check/README.md +++ b/plugins/inputs/nginx_upstream_check/README.md @@ -1,6 +1,6 @@ # Nginx Upstream Check Input Plugin -Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module). +Read the status output of the nginx_upstream_check (). This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine if the server is still available. If checks are failed the server is marked as "down" and will not receive any requests until the check will pass and a server will be marked as "up" again. @@ -8,7 +8,7 @@ until the check will pass and a server will be marked as "up" again. The status page displays the current status of all upstreams and servers as well as number of the failed and successful checks. This information can be exported in JSON format and parsed by this input. -### Configuration: +## Configuration ```toml ## An URL where Nginx Upstream check module is enabled @@ -39,36 +39,38 @@ checks. This information can be exported in JSON format and parsed by this input # insecure_skip_verify = false ``` -### Measurements & Fields: +## Measurements & Fields - Measurement - - fall (The number of failed server check attempts, counter) - - rise (The number of successful server check attempts, counter) - - status (The reporter server status as a string) - - status_code (The server status code. 1 - up, 2 - down, 0 - other) + - fall (The number of failed server check attempts, counter) + - rise (The number of successful server check attempts, counter) + - status (The reporter server status as a string) + - status_code (The server status code. 1 - up, 2 - down, 0 - other) The "status_code" field most likely will be the most useful one because it allows you to determine the current state of every server and, possible, add some monitoring to watch over it. InfluxDB can use string values and the "status" field can be used instead, but for most other monitoring solutions the integer code will be appropriate. -### Tags: +## Tags - All measurements have the following tags: - - name (The hostname or IP of the upstream server) - - port (The alternative check port, 0 if the default one is used) - - type (The check type, http/tcp) - - upstream (The name of the upstream block in the Nginx configuration) - - url (The status url used by telegraf) + - name (The hostname or IP of the upstream server) + - port (The alternative check port, 0 if the default one is used) + - type (The check type, http/tcp) + - upstream (The name of the upstream block in the Nginx configuration) + - url (The status url used by telegraf) -### Example Output: +## Example Output When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test ``` It produces: -``` + +```text * Plugin: nginx_upstream_check, Collection 1 > nginx_upstream_check,host=node1,name=192.168.0.1:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=0i,rise=100i,status="up",status_code=1i 1529088524000000000 > nginx_upstream_check,host=node2,name=192.168.0.2:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=100i,rise=0i,status="down",status_code=2i 1529088524000000000 diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 42e0cab62d53e..8ad8cc91e8a9e 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -121,7 +121,7 @@ func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { } // gatherJSONData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) error { +func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{}) error { var method string if check.Method != "" { method = check.Method @@ -129,7 +129,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e method = "GET" } - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest(method, address, nil) if err != nil { return err } @@ -153,7 +153,7 @@ func (check *NginxUpstreamCheck) gatherJSONData(url string, value interface{}) e if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -187,10 +187,10 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error return nil } -func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { +func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJSONData(url, checkData) + err := check.gatherJSONData(address, checkData) if err != nil { return err } @@ -201,7 +201,7 @@ func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegr "type": server.Type, "name": server.Name, "port": strconv.Itoa(int(server.Port)), - "url": url, + "url": address, } fields := map[string]interface{}{ diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md index fe9e7fd6ea62f..117b0ca24cb17 100644 --- a/plugins/inputs/nginx_vts/README.md +++ b/plugins/inputs/nginx_vts/README.md @@ -1,9 +1,9 @@ # Nginx Virtual Host Traffic (VTS) Input Plugin -This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. +This plugin gathers Nginx status using external virtual host traffic status module - . This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis). -### Configuration: +## Configuration ```toml # Read nginx status information using nginx-module-vts module @@ -12,7 +12,7 @@ For module configuration details please see its [documentation](https://github.c urls = ["http://localhost/status"] ``` -### Measurements & Fields: +## Measurements & Fields - nginx_vts_connections - active @@ -70,8 +70,7 @@ For module configuration details please see its [documentation](https://github.c - hit - scarce - -### Tags: +## Tags - nginx_vts_connections - source @@ -95,10 +94,10 @@ For module configuration details please see its [documentation](https://github.c - port - zone - -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_vts]] ## An array of Nginx status URIs to gather stats. @@ -106,12 +105,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_vts -test ``` It produces: -``` + +```shell nginx_vts_connections,source=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 nginx_vts_server,zone=example.com,port=80,host=localhost,source=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 nginx_vts_server,host=localhost,source=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 diff --git a/plugins/inputs/nomad/README.md b/plugins/inputs/nomad/README.md new file mode 100644 index 0000000000000..ea011e07e149a --- /dev/null +++ b/plugins/inputs/nomad/README.md @@ -0,0 +1,28 @@ +# Hashicorp Nomad Input Plugin + +The Nomad plugin must grab metrics from every Nomad agent of the cluster. Telegraf may be present in every node and connect to the agent locally. In this case should be something like `http://127.0.0.1:4646`. + +> Tested on Nomad 1.1.6 + +## Configuration + +```toml +[[inputs.nomad]] + ## URL for the Nomad agent + # url = "http://127.0.0.1:4646" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +``` + +## Metrics + +Both Nomad servers and agents collect various metrics. For every details, please have a look at Nomad following documentation: + +- [https://www.nomadproject.io/docs/operations/metrics](https://www.nomadproject.io/docs/operations/metrics) +- [https://www.nomadproject.io/docs/operations/telemetry](https://www.nomadproject.io/docs/operations/telemetry) diff --git a/plugins/inputs/nomad/nomad.go b/plugins/inputs/nomad/nomad.go new file mode 100644 index 0000000000000..4498b16db81d2 --- /dev/null +++ b/plugins/inputs/nomad/nomad.go @@ -0,0 +1,177 @@ +package nomad + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Nomad configuration object +type Nomad struct { + URL string `toml:"url"` + + ResponseTimeout config.Duration `toml:"response_timeout"` + + tls.ClientConfig + + roundTripper http.RoundTripper +} + +const timeLayout = "2006-01-02 15:04:05 -0700 MST" + +var sampleConfig = ` + ## URL for the Nomad agent + # url = "http://127.0.0.1:4646" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +` + +func init() { + inputs.Add("nomad", func() telegraf.Input { + return &Nomad{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} + +// SampleConfig returns a sample config +func (n *Nomad) SampleConfig() string { + return sampleConfig +} + +// Description returns a description of the plugin +func (n *Nomad) Description() string { + return "Read metrics from the Nomad API" +} + +func (n *Nomad) Init() error { + if n.URL == "" { + n.URL = "http://127.0.0.1:4646" + } + + tlsCfg, err := n.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("setting up TLS configuration failed: %v", err) + } + + n.roundTripper = &http.Transport{ + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: time.Duration(n.ResponseTimeout), + } + + return nil +} + +// Gather, collects metrics from Nomad endpoint +func (n *Nomad) Gather(acc telegraf.Accumulator) error { + summaryMetrics := &MetricsSummary{} + err := n.loadJSON(n.URL+"/v1/metrics", summaryMetrics) + if err != nil { + return err + } + + err = buildNomadMetrics(acc, summaryMetrics) + if err != nil { + return err + } + + return nil +} + +func (n *Nomad) loadJSON(url string, v interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + resp, err := n.roundTripper.RoundTrip(req) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return fmt.Errorf("error parsing json response: %s", err) + } + + return nil +} + +// buildNomadMetrics, it builds all the metrics and adds them to the accumulator) +func buildNomadMetrics(acc telegraf.Accumulator, summaryMetrics *MetricsSummary) error { + t, err := time.Parse(timeLayout, summaryMetrics.Timestamp) + if err != nil { + return fmt.Errorf("error parsing time: %s", err) + } + + for _, counters := range summaryMetrics.Counters { + tags := counters.DisplayLabels + + fields := map[string]interface{}{ + "count": counters.Count, + "rate": counters.Rate, + "sum": counters.Sum, + "sumsq": counters.SumSq, + "min": counters.Min, + "max": counters.Max, + "mean": counters.Mean, + } + acc.AddCounter(counters.Name, fields, tags, t) + } + + for _, gauges := range summaryMetrics.Gauges { + tags := gauges.DisplayLabels + + fields := map[string]interface{}{ + "value": gauges.Value, + } + + acc.AddGauge(gauges.Name, fields, tags, t) + } + + for _, points := range summaryMetrics.Points { + tags := make(map[string]string) + + fields := map[string]interface{}{ + "value": points.Points, + } + + acc.AddFields(points.Name, fields, tags, t) + } + + for _, samples := range summaryMetrics.Samples { + tags := samples.DisplayLabels + + fields := map[string]interface{}{ + "count": samples.Count, + "rate": samples.Rate, + "sum": samples.Sum, + "stddev": samples.Stddev, + "sumsq": samples.SumSq, + "min": samples.Min, + "max": samples.Max, + "mean": samples.Mean, + } + acc.AddCounter(samples.Name, fields, tags, t) + } + + return nil +} diff --git a/plugins/inputs/nomad/nomad_metrics.go b/plugins/inputs/nomad/nomad_metrics.go new file mode 100644 index 0000000000000..72445df0b83ae --- /dev/null +++ b/plugins/inputs/nomad/nomad_metrics.go @@ -0,0 +1,53 @@ +package nomad + +import ( + "time" +) + +type MetricsSummary struct { + Timestamp string `json:"timestamp"` + Gauges []GaugeValue `json:"gauges"` + Points []PointValue `json:"points"` + Counters []SampledValue `json:"counters"` + Samples []SampledValue `json:"samples"` +} + +type GaugeValue struct { + Name string `json:"name"` + Hash string `json:"-"` + Value float32 `json:"value"` + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string `json:"name"` + Points []float32 `json:"points"` +} + +type SampledValue struct { + Name string `json:"name"` + Hash string `json:"-"` + *AggregateSample + Mean float64 `json:"mean"` + Stddev float64 `json:"stddev"` + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type AggregateSample struct { + Count int `json:"count"` + Rate float64 `json:"rate"` + Sum float64 `json:"sum"` + SumSq float64 `json:"-"` + Min float64 `json:"min"` + Max float64 `json:"max"` + LastUpdated time.Time `json:"-"` +} + +type Label struct { + Name string `json:"name"` + Value string `json:"value"` +} diff --git a/plugins/inputs/nomad/nomad_test.go b/plugins/inputs/nomad/nomad_test.go new file mode 100644 index 0000000000000..49e39e3b7f531 --- /dev/null +++ b/plugins/inputs/nomad/nomad_test.go @@ -0,0 +1,104 @@ +package nomad + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestNomadStats(t *testing.T) { + var applyTests = []struct { + name string + expected []telegraf.Metric + }{ + { + name: "Metrics", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nomad.nomad.rpc.query", + map[string]string{ + "host": "node1", + }, + map[string]interface{}{ + "count": int(7), + "max": float64(1), + "min": float64(1), + "mean": float64(1), + "rate": float64(0.7), + "sum": float64(7), + "sumsq": float64(0), + }, + time.Unix(1636843140, 0), + 1, + ), + testutil.MustMetric( + "nomad.client.allocated.cpu", + map[string]string{ + "node_scheduling_eligibility": "eligible", + "host": "node1", + "node_id": "2bbff078-8473-a9de-6c5e-42b4e053e12f", + "datacenter": "dc1", + "node_class": "none", + "node_status": "ready", + }, + map[string]interface{}{ + "value": float32(500), + }, + time.Unix(1636843140, 0), + 2, + ), + testutil.MustMetric( + "nomad.memberlist.gossip", + map[string]string{ + "host": "node1", + }, + map[string]interface{}{ + "count": int(20), + "max": float64(0.03747599944472313), + "mean": float64(0.013159099989570678), + "min": float64(0.003459000028669834), + "rate": float64(0.026318199979141355), + "stddev": float64(0.009523742715522742), + "sum": float64(0.26318199979141355), + "sumsq": float64(0), + }, + time.Unix(1636843140, 0), + 1, + ), + }, + }, + } + + for _, tt := range applyTests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v1/metrics" { + w.WriteHeader(http.StatusOK) + responseKeyMetrics, _ := ioutil.ReadFile("testdata/response_key_metrics.json") + _, err := fmt.Fprintln(w, string(responseKeyMetrics)) + require.NoError(t, err) + } + })) + defer ts.Close() + + plugin := &Nomad{ + URL: ts.URL, + } + err := plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/nomad/testdata/response_key_metrics.json b/plugins/inputs/nomad/testdata/response_key_metrics.json new file mode 100644 index 0000000000000..4e9879bdd7afa --- /dev/null +++ b/plugins/inputs/nomad/testdata/response_key_metrics.json @@ -0,0 +1,48 @@ +{ + "Counters": [ + { + "Count": 7, + "Labels": { + "host": "node1" + }, + "Max": 1, + "Mean": 1, + "Min": 1, + "Name": "nomad.nomad.rpc.query", + "Rate": 0.7, + "Stddev": 0, + "Sum": 7 + } + ], + "Gauges": [ + { + "Labels": { + "node_scheduling_eligibility": "eligible", + "host": "node1", + "node_id": "2bbff078-8473-a9de-6c5e-42b4e053e12f", + "datacenter": "dc1", + "node_class": "none", + "node_status": "ready" + }, + "Name": "nomad.client.allocated.cpu", + "Value": 500 + } + ], + "Points": [], + "Samples": [ + { + "Count": 20, + "Labels": { + "host": "node1" + }, + "Max": 0.03747599944472313, + "Mean": 0.013159099989570678, + "Min": 0.003459000028669834, + "Name": "nomad.memberlist.gossip", + "Rate": 0.026318199979141355, + "Stddev": 0.009523742715522742, + "Sum": 0.26318199979141355 + } + ], + "Timestamp": "2021-11-13 22:39:00 +0000 UTC" +} diff --git a/plugins/inputs/nsd/README.md b/plugins/inputs/nsd/README.md index 2d7f8833c2db8..51c45b1f4fb66 100644 --- a/plugins/inputs/nsd/README.md +++ b/plugins/inputs/nsd/README.md @@ -4,7 +4,7 @@ This plugin gathers stats from [NSD](https://www.nlnetlabs.nl/projects/nsd/about) - an authoritative DNS name server. -### Configuration: +## Configuration ```toml # A plugin to collect stats from the NSD DNS resolver @@ -26,7 +26,7 @@ server. # timeout = "1s" ``` -#### Permissions: +### Permissions It's important to note that this plugin references nsd-control, which may require additional permissions to execute successfully. Depending on the @@ -34,6 +34,7 @@ user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -46,12 +47,14 @@ telegraf : telegraf nsd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.nsd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -62,11 +65,11 @@ Defaults!NSDCONTROLCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Metrics: +## Metrics This is the full list of stats provided by nsd-control. In the output, the dots in the nsd-control stat name are replaced by underscores (see -https://www.nlnetlabs.nl/documentation/nsd/nsd-control/ for details). + for details). - nsd - fields: diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index f75f700eaa2f9..6c8998129cf90 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -61,20 +61,20 @@ func (s *NSD) SampleConfig() string { } // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server string, configFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if server != "" { + host, port, err := net.SplitHostPort(server) if err == nil { - Server = host + "@" + port + server = host + "@" + port } - cmdArgs = append([]string{"-s", Server}, cmdArgs...) + cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) } cmd := exec.Command(cmdName, cmdArgs...) @@ -119,7 +119,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { fieldValue, err := strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v", + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) continue } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index d64cad7dcea63..74f4a14cf96fa 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,16 +3,13 @@ package nsd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,13 +23,13 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("nsd")) - assert.True(t, acc.HasMeasurement("nsd_servers")) + require.True(t, acc.HasMeasurement("nsd")) + require.True(t, acc.HasMeasurement("nsd_servers")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 99, acc.NFields()) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 99, acc.NFields()) acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) diff --git a/plugins/inputs/nsq/README.md b/plugins/inputs/nsq/README.md index 00c1089afe309..78ba28d49b11f 100644 --- a/plugins/inputs/nsq/README.md +++ b/plugins/inputs/nsq/README.md @@ -1,6 +1,6 @@ # NSQ Input Plugin -### Configuration: +## Configuration ```toml # Description diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index d1e7194bbd7e0..b10bfbf6f7b68 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -3,7 +3,7 @@ The [NSQ][nsq] consumer plugin reads from NSQD and creates metrics using one of the supported [input data formats][]. -### Configuration: +## Configuration ```toml # Read metrics from NSQD topic(s) diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index d5086862bbf7e..4c6d944746440 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -11,10 +11,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" "github.com/nsqio/go-nsq" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) // This test is modeled after the kafka consumer integration test @@ -36,7 +37,7 @@ func TestReadsMetricsFromNSQ(t *testing.T) { } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") - newMockNSQD(script, addr.String()) + newMockNSQD(t, script, addr.String()) consumer := &NSQConsumer{ Log: testutil.Logger{}, @@ -76,6 +77,8 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { ticker := time.NewTicker(5 * time.Millisecond) defer ticker.Stop() counter := 0 + + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: @@ -89,16 +92,15 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { } } -func newMockNSQD(script []instruction, addr string) *mockNSQD { +func newMockNSQD(t *testing.T, script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) - } + require.NoError(t, err, "listen (%s) failed", n.tcpAddr.String()) + n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) @@ -139,6 +141,7 @@ func (n *mockNSQD) handle(conn net.Conn) { buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { + //nolint:revive // log.Fatalf called intentionally log.Fatalf("ERROR: failed to read protocol version - %s", err) } @@ -171,14 +174,14 @@ func (n *mockNSQD) handle(conn net.Conn) { l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } case bytes.Equal(params[0], []byte("RDY")): diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md index c0ebc2654f5b8..0e2fa217300c7 100644 --- a/plugins/inputs/nstat/README.md +++ b/plugins/inputs/nstat/README.md @@ -2,10 +2,11 @@ Plugin collects network metrics from `/proc/net/netstat`, `/proc/net/snmp` and `/proc/net/snmp6` files -### Configuration +## Configuration The plugin firstly tries to read file paths from config values if it is empty, then it reads from env variables. + * `PROC_NET_NETSTAT` * `PROC_NET_SNMP` * `PROC_NET_SNMP6` @@ -15,331 +16,335 @@ then it tries to read the proc root from env - `PROC_ROOT`, and sets `/proc` as a root path if `PROC_ROOT` is also empty. Then appends default file paths: + * `/net/netstat` * `/net/snmp` * `/net/snmp6` So if nothing is given, no paths in config and in env vars, the plugin takes the default paths. + * `/proc/net/netstat` * `/proc/net/snmp` * `/proc/net/snmp6` The sample config file + ```toml [[inputs.nstat]] ## file paths ## e.g: /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 - # proc_net_netstat = "" - # proc_net_snmp = "" - # proc_net_snmp6 = "" + # proc_net_netstat = "" + # proc_net_snmp = "" + # proc_net_snmp6 = "" ## dump metrics with 0 values too - # dump_zeros = true + # dump_zeros = true ``` In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no error would be raised. -### Measurements & Fields +## Measurements & Fields + +* nstat + * Icmp6InCsumErrors + * Icmp6InDestUnreachs + * Icmp6InEchoReplies + * Icmp6InEchos + * Icmp6InErrors + * Icmp6InGroupMembQueries + * Icmp6InGroupMembReductions + * Icmp6InGroupMembResponses + * Icmp6InMLDv2Reports + * Icmp6InMsgs + * Icmp6InNeighborAdvertisements + * Icmp6InNeighborSolicits + * Icmp6InParmProblems + * Icmp6InPktTooBigs + * Icmp6InRedirects + * Icmp6InRouterAdvertisements + * Icmp6InRouterSolicits + * Icmp6InTimeExcds + * Icmp6OutDestUnreachs + * Icmp6OutEchoReplies + * Icmp6OutEchos + * Icmp6OutErrors + * Icmp6OutGroupMembQueries + * Icmp6OutGroupMembReductions + * Icmp6OutGroupMembResponses + * Icmp6OutMLDv2Reports + * Icmp6OutMsgs + * Icmp6OutNeighborAdvertisements + * Icmp6OutNeighborSolicits + * Icmp6OutParmProblems + * Icmp6OutPktTooBigs + * Icmp6OutRedirects + * Icmp6OutRouterAdvertisements + * Icmp6OutRouterSolicits + * Icmp6OutTimeExcds + * Icmp6OutType133 + * Icmp6OutType135 + * Icmp6OutType143 + * IcmpInAddrMaskReps + * IcmpInAddrMasks + * IcmpInCsumErrors + * IcmpInDestUnreachs + * IcmpInEchoReps + * IcmpInEchos + * IcmpInErrors + * IcmpInMsgs + * IcmpInParmProbs + * IcmpInRedirects + * IcmpInSrcQuenchs + * IcmpInTimeExcds + * IcmpInTimestampReps + * IcmpInTimestamps + * IcmpMsgInType3 + * IcmpMsgOutType3 + * IcmpOutAddrMaskReps + * IcmpOutAddrMasks + * IcmpOutDestUnreachs + * IcmpOutEchoReps + * IcmpOutEchos + * IcmpOutErrors + * IcmpOutMsgs + * IcmpOutParmProbs + * IcmpOutRedirects + * IcmpOutSrcQuenchs + * IcmpOutTimeExcds + * IcmpOutTimestampReps + * IcmpOutTimestamps + * Ip6FragCreates + * Ip6FragFails + * Ip6FragOKs + * Ip6InAddrErrors + * Ip6InBcastOctets + * Ip6InCEPkts + * Ip6InDelivers + * Ip6InDiscards + * Ip6InECT0Pkts + * Ip6InECT1Pkts + * Ip6InHdrErrors + * Ip6InMcastOctets + * Ip6InMcastPkts + * Ip6InNoECTPkts + * Ip6InNoRoutes + * Ip6InOctets + * Ip6InReceives + * Ip6InTooBigErrors + * Ip6InTruncatedPkts + * Ip6InUnknownProtos + * Ip6OutBcastOctets + * Ip6OutDiscards + * Ip6OutForwDatagrams + * Ip6OutMcastOctets + * Ip6OutMcastPkts + * Ip6OutNoRoutes + * Ip6OutOctets + * Ip6OutRequests + * Ip6ReasmFails + * Ip6ReasmOKs + * Ip6ReasmReqds + * Ip6ReasmTimeout + * IpDefaultTTL + * IpExtInBcastOctets + * IpExtInBcastPkts + * IpExtInCEPkts + * IpExtInCsumErrors + * IpExtInECT0Pkts + * IpExtInECT1Pkts + * IpExtInMcastOctets + * IpExtInMcastPkts + * IpExtInNoECTPkts + * IpExtInNoRoutes + * IpExtInOctets + * IpExtInTruncatedPkts + * IpExtOutBcastOctets + * IpExtOutBcastPkts + * IpExtOutMcastOctets + * IpExtOutMcastPkts + * IpExtOutOctets + * IpForwDatagrams + * IpForwarding + * IpFragCreates + * IpFragFails + * IpFragOKs + * IpInAddrErrors + * IpInDelivers + * IpInDiscards + * IpInHdrErrors + * IpInReceives + * IpInUnknownProtos + * IpOutDiscards + * IpOutNoRoutes + * IpOutRequests + * IpReasmFails + * IpReasmOKs + * IpReasmReqds + * IpReasmTimeout + * TcpActiveOpens + * TcpAttemptFails + * TcpCurrEstab + * TcpEstabResets + * TcpExtArpFilter + * TcpExtBusyPollRxPackets + * TcpExtDelayedACKLocked + * TcpExtDelayedACKLost + * TcpExtDelayedACKs + * TcpExtEmbryonicRsts + * TcpExtIPReversePathFilter + * TcpExtListenDrops + * TcpExtListenOverflows + * TcpExtLockDroppedIcmps + * TcpExtOfoPruned + * TcpExtOutOfWindowIcmps + * TcpExtPAWSActive + * TcpExtPAWSEstab + * TcpExtPAWSPassive + * TcpExtPruneCalled + * TcpExtRcvPruned + * TcpExtSyncookiesFailed + * TcpExtSyncookiesRecv + * TcpExtSyncookiesSent + * TcpExtTCPACKSkippedChallenge + * TcpExtTCPACKSkippedFinWait2 + * TcpExtTCPACKSkippedPAWS + * TcpExtTCPACKSkippedSeq + * TcpExtTCPACKSkippedSynRecv + * TcpExtTCPACKSkippedTimeWait + * TcpExtTCPAbortFailed + * TcpExtTCPAbortOnClose + * TcpExtTCPAbortOnData + * TcpExtTCPAbortOnLinger + * TcpExtTCPAbortOnMemory + * TcpExtTCPAbortOnTimeout + * TcpExtTCPAutoCorking + * TcpExtTCPBacklogDrop + * TcpExtTCPChallengeACK + * TcpExtTCPDSACKIgnoredNoUndo + * TcpExtTCPDSACKIgnoredOld + * TcpExtTCPDSACKOfoRecv + * TcpExtTCPDSACKOfoSent + * TcpExtTCPDSACKOldSent + * TcpExtTCPDSACKRecv + * TcpExtTCPDSACKUndo + * TcpExtTCPDeferAcceptDrop + * TcpExtTCPDirectCopyFromBacklog + * TcpExtTCPDirectCopyFromPrequeue + * TcpExtTCPFACKReorder + * TcpExtTCPFastOpenActive + * TcpExtTCPFastOpenActiveFail + * TcpExtTCPFastOpenCookieReqd + * TcpExtTCPFastOpenListenOverflow + * TcpExtTCPFastOpenPassive + * TcpExtTCPFastOpenPassiveFail + * TcpExtTCPFastRetrans + * TcpExtTCPForwardRetrans + * TcpExtTCPFromZeroWindowAdv + * TcpExtTCPFullUndo + * TcpExtTCPHPAcks + * TcpExtTCPHPHits + * TcpExtTCPHPHitsToUser + * TcpExtTCPHystartDelayCwnd + * TcpExtTCPHystartDelayDetect + * TcpExtTCPHystartTrainCwnd + * TcpExtTCPHystartTrainDetect + * TcpExtTCPKeepAlive + * TcpExtTCPLossFailures + * TcpExtTCPLossProbeRecovery + * TcpExtTCPLossProbes + * TcpExtTCPLossUndo + * TcpExtTCPLostRetransmit + * TcpExtTCPMD5NotFound + * TcpExtTCPMD5Unexpected + * TcpExtTCPMTUPFail + * TcpExtTCPMTUPSuccess + * TcpExtTCPMemoryPressures + * TcpExtTCPMinTTLDrop + * TcpExtTCPOFODrop + * TcpExtTCPOFOMerge + * TcpExtTCPOFOQueue + * TcpExtTCPOrigDataSent + * TcpExtTCPPartialUndo + * TcpExtTCPPrequeueDropped + * TcpExtTCPPrequeued + * TcpExtTCPPureAcks + * TcpExtTCPRcvCoalesce + * TcpExtTCPRcvCollapsed + * TcpExtTCPRenoFailures + * TcpExtTCPRenoRecovery + * TcpExtTCPRenoRecoveryFail + * TcpExtTCPRenoReorder + * TcpExtTCPReqQFullDoCookies + * TcpExtTCPReqQFullDrop + * TcpExtTCPRetransFail + * TcpExtTCPSACKDiscard + * TcpExtTCPSACKReneging + * TcpExtTCPSACKReorder + * TcpExtTCPSYNChallenge + * TcpExtTCPSackFailures + * TcpExtTCPSackMerged + * TcpExtTCPSackRecovery + * TcpExtTCPSackRecoveryFail + * TcpExtTCPSackShiftFallback + * TcpExtTCPSackShifted + * TcpExtTCPSchedulerFailed + * TcpExtTCPSlowStartRetrans + * TcpExtTCPSpuriousRTOs + * TcpExtTCPSpuriousRtxHostQueues + * TcpExtTCPSynRetrans + * TcpExtTCPTSReorder + * TcpExtTCPTimeWaitOverflow + * TcpExtTCPTimeouts + * TcpExtTCPToZeroWindowAdv + * TcpExtTCPWantZeroWindowAdv + * TcpExtTCPWinProbe + * TcpExtTW + * TcpExtTWKilled + * TcpExtTWRecycled + * TcpInCsumErrors + * TcpInErrs + * TcpInSegs + * TcpMaxConn + * TcpOutRsts + * TcpOutSegs + * TcpPassiveOpens + * TcpRetransSegs + * TcpRtoAlgorithm + * TcpRtoMax + * TcpRtoMin + * Udp6IgnoredMulti + * Udp6InCsumErrors + * Udp6InDatagrams + * Udp6InErrors + * Udp6NoPorts + * Udp6OutDatagrams + * Udp6RcvbufErrors + * Udp6SndbufErrors + * UdpIgnoredMulti + * UdpInCsumErrors + * UdpInDatagrams + * UdpInErrors + * UdpLite6InCsumErrors + * UdpLite6InDatagrams + * UdpLite6InErrors + * UdpLite6NoPorts + * UdpLite6OutDatagrams + * UdpLite6RcvbufErrors + * UdpLite6SndbufErrors + * UdpLiteIgnoredMulti + * UdpLiteInCsumErrors + * UdpLiteInDatagrams + * UdpLiteInErrors + * UdpLiteNoPorts + * UdpLiteOutDatagrams + * UdpLiteRcvbufErrors + * UdpLiteSndbufErrors + * UdpNoPorts + * UdpOutDatagrams + * UdpRcvbufErrors + * UdpSndbufErrors -- nstat - - Icmp6InCsumErrors - - Icmp6InDestUnreachs - - Icmp6InEchoReplies - - Icmp6InEchos - - Icmp6InErrors - - Icmp6InGroupMembQueries - - Icmp6InGroupMembReductions - - Icmp6InGroupMembResponses - - Icmp6InMLDv2Reports - - Icmp6InMsgs - - Icmp6InNeighborAdvertisements - - Icmp6InNeighborSolicits - - Icmp6InParmProblems - - Icmp6InPktTooBigs - - Icmp6InRedirects - - Icmp6InRouterAdvertisements - - Icmp6InRouterSolicits - - Icmp6InTimeExcds - - Icmp6OutDestUnreachs - - Icmp6OutEchoReplies - - Icmp6OutEchos - - Icmp6OutErrors - - Icmp6OutGroupMembQueries - - Icmp6OutGroupMembReductions - - Icmp6OutGroupMembResponses - - Icmp6OutMLDv2Reports - - Icmp6OutMsgs - - Icmp6OutNeighborAdvertisements - - Icmp6OutNeighborSolicits - - Icmp6OutParmProblems - - Icmp6OutPktTooBigs - - Icmp6OutRedirects - - Icmp6OutRouterAdvertisements - - Icmp6OutRouterSolicits - - Icmp6OutTimeExcds - - Icmp6OutType133 - - Icmp6OutType135 - - Icmp6OutType143 - - IcmpInAddrMaskReps - - IcmpInAddrMasks - - IcmpInCsumErrors - - IcmpInDestUnreachs - - IcmpInEchoReps - - IcmpInEchos - - IcmpInErrors - - IcmpInMsgs - - IcmpInParmProbs - - IcmpInRedirects - - IcmpInSrcQuenchs - - IcmpInTimeExcds - - IcmpInTimestampReps - - IcmpInTimestamps - - IcmpMsgInType3 - - IcmpMsgOutType3 - - IcmpOutAddrMaskReps - - IcmpOutAddrMasks - - IcmpOutDestUnreachs - - IcmpOutEchoReps - - IcmpOutEchos - - IcmpOutErrors - - IcmpOutMsgs - - IcmpOutParmProbs - - IcmpOutRedirects - - IcmpOutSrcQuenchs - - IcmpOutTimeExcds - - IcmpOutTimestampReps - - IcmpOutTimestamps - - Ip6FragCreates - - Ip6FragFails - - Ip6FragOKs - - Ip6InAddrErrors - - Ip6InBcastOctets - - Ip6InCEPkts - - Ip6InDelivers - - Ip6InDiscards - - Ip6InECT0Pkts - - Ip6InECT1Pkts - - Ip6InHdrErrors - - Ip6InMcastOctets - - Ip6InMcastPkts - - Ip6InNoECTPkts - - Ip6InNoRoutes - - Ip6InOctets - - Ip6InReceives - - Ip6InTooBigErrors - - Ip6InTruncatedPkts - - Ip6InUnknownProtos - - Ip6OutBcastOctets - - Ip6OutDiscards - - Ip6OutForwDatagrams - - Ip6OutMcastOctets - - Ip6OutMcastPkts - - Ip6OutNoRoutes - - Ip6OutOctets - - Ip6OutRequests - - Ip6ReasmFails - - Ip6ReasmOKs - - Ip6ReasmReqds - - Ip6ReasmTimeout - - IpDefaultTTL - - IpExtInBcastOctets - - IpExtInBcastPkts - - IpExtInCEPkts - - IpExtInCsumErrors - - IpExtInECT0Pkts - - IpExtInECT1Pkts - - IpExtInMcastOctets - - IpExtInMcastPkts - - IpExtInNoECTPkts - - IpExtInNoRoutes - - IpExtInOctets - - IpExtInTruncatedPkts - - IpExtOutBcastOctets - - IpExtOutBcastPkts - - IpExtOutMcastOctets - - IpExtOutMcastPkts - - IpExtOutOctets - - IpForwDatagrams - - IpForwarding - - IpFragCreates - - IpFragFails - - IpFragOKs - - IpInAddrErrors - - IpInDelivers - - IpInDiscards - - IpInHdrErrors - - IpInReceives - - IpInUnknownProtos - - IpOutDiscards - - IpOutNoRoutes - - IpOutRequests - - IpReasmFails - - IpReasmOKs - - IpReasmReqds - - IpReasmTimeout - - TcpActiveOpens - - TcpAttemptFails - - TcpCurrEstab - - TcpEstabResets - - TcpExtArpFilter - - TcpExtBusyPollRxPackets - - TcpExtDelayedACKLocked - - TcpExtDelayedACKLost - - TcpExtDelayedACKs - - TcpExtEmbryonicRsts - - TcpExtIPReversePathFilter - - TcpExtListenDrops - - TcpExtListenOverflows - - TcpExtLockDroppedIcmps - - TcpExtOfoPruned - - TcpExtOutOfWindowIcmps - - TcpExtPAWSActive - - TcpExtPAWSEstab - - TcpExtPAWSPassive - - TcpExtPruneCalled - - TcpExtRcvPruned - - TcpExtSyncookiesFailed - - TcpExtSyncookiesRecv - - TcpExtSyncookiesSent - - TcpExtTCPACKSkippedChallenge - - TcpExtTCPACKSkippedFinWait2 - - TcpExtTCPACKSkippedPAWS - - TcpExtTCPACKSkippedSeq - - TcpExtTCPACKSkippedSynRecv - - TcpExtTCPACKSkippedTimeWait - - TcpExtTCPAbortFailed - - TcpExtTCPAbortOnClose - - TcpExtTCPAbortOnData - - TcpExtTCPAbortOnLinger - - TcpExtTCPAbortOnMemory - - TcpExtTCPAbortOnTimeout - - TcpExtTCPAutoCorking - - TcpExtTCPBacklogDrop - - TcpExtTCPChallengeACK - - TcpExtTCPDSACKIgnoredNoUndo - - TcpExtTCPDSACKIgnoredOld - - TcpExtTCPDSACKOfoRecv - - TcpExtTCPDSACKOfoSent - - TcpExtTCPDSACKOldSent - - TcpExtTCPDSACKRecv - - TcpExtTCPDSACKUndo - - TcpExtTCPDeferAcceptDrop - - TcpExtTCPDirectCopyFromBacklog - - TcpExtTCPDirectCopyFromPrequeue - - TcpExtTCPFACKReorder - - TcpExtTCPFastOpenActive - - TcpExtTCPFastOpenActiveFail - - TcpExtTCPFastOpenCookieReqd - - TcpExtTCPFastOpenListenOverflow - - TcpExtTCPFastOpenPassive - - TcpExtTCPFastOpenPassiveFail - - TcpExtTCPFastRetrans - - TcpExtTCPForwardRetrans - - TcpExtTCPFromZeroWindowAdv - - TcpExtTCPFullUndo - - TcpExtTCPHPAcks - - TcpExtTCPHPHits - - TcpExtTCPHPHitsToUser - - TcpExtTCPHystartDelayCwnd - - TcpExtTCPHystartDelayDetect - - TcpExtTCPHystartTrainCwnd - - TcpExtTCPHystartTrainDetect - - TcpExtTCPKeepAlive - - TcpExtTCPLossFailures - - TcpExtTCPLossProbeRecovery - - TcpExtTCPLossProbes - - TcpExtTCPLossUndo - - TcpExtTCPLostRetransmit - - TcpExtTCPMD5NotFound - - TcpExtTCPMD5Unexpected - - TcpExtTCPMTUPFail - - TcpExtTCPMTUPSuccess - - TcpExtTCPMemoryPressures - - TcpExtTCPMinTTLDrop - - TcpExtTCPOFODrop - - TcpExtTCPOFOMerge - - TcpExtTCPOFOQueue - - TcpExtTCPOrigDataSent - - TcpExtTCPPartialUndo - - TcpExtTCPPrequeueDropped - - TcpExtTCPPrequeued - - TcpExtTCPPureAcks - - TcpExtTCPRcvCoalesce - - TcpExtTCPRcvCollapsed - - TcpExtTCPRenoFailures - - TcpExtTCPRenoRecovery - - TcpExtTCPRenoRecoveryFail - - TcpExtTCPRenoReorder - - TcpExtTCPReqQFullDoCookies - - TcpExtTCPReqQFullDrop - - TcpExtTCPRetransFail - - TcpExtTCPSACKDiscard - - TcpExtTCPSACKReneging - - TcpExtTCPSACKReorder - - TcpExtTCPSYNChallenge - - TcpExtTCPSackFailures - - TcpExtTCPSackMerged - - TcpExtTCPSackRecovery - - TcpExtTCPSackRecoveryFail - - TcpExtTCPSackShiftFallback - - TcpExtTCPSackShifted - - TcpExtTCPSchedulerFailed - - TcpExtTCPSlowStartRetrans - - TcpExtTCPSpuriousRTOs - - TcpExtTCPSpuriousRtxHostQueues - - TcpExtTCPSynRetrans - - TcpExtTCPTSReorder - - TcpExtTCPTimeWaitOverflow - - TcpExtTCPTimeouts - - TcpExtTCPToZeroWindowAdv - - TcpExtTCPWantZeroWindowAdv - - TcpExtTCPWinProbe - - TcpExtTW - - TcpExtTWKilled - - TcpExtTWRecycled - - TcpInCsumErrors - - TcpInErrs - - TcpInSegs - - TcpMaxConn - - TcpOutRsts - - TcpOutSegs - - TcpPassiveOpens - - TcpRetransSegs - - TcpRtoAlgorithm - - TcpRtoMax - - TcpRtoMin - - Udp6IgnoredMulti - - Udp6InCsumErrors - - Udp6InDatagrams - - Udp6InErrors - - Udp6NoPorts - - Udp6OutDatagrams - - Udp6RcvbufErrors - - Udp6SndbufErrors - - UdpIgnoredMulti - - UdpInCsumErrors - - UdpInDatagrams - - UdpInErrors - - UdpLite6InCsumErrors - - UdpLite6InDatagrams - - UdpLite6InErrors - - UdpLite6NoPorts - - UdpLite6OutDatagrams - - UdpLite6RcvbufErrors - - UdpLite6SndbufErrors - - UdpLiteIgnoredMulti - - UdpLiteInCsumErrors - - UdpLiteInDatagrams - - UdpLiteInErrors - - UdpLiteNoPorts - - UdpLiteOutDatagrams - - UdpLiteRcvbufErrors - - UdpLiteSndbufErrors - - UdpNoPorts - - UdpOutDatagrams - - UdpRcvbufErrors - - UdpSndbufErrors +## Tags -### Tags -- All measurements have the following tags - - host (host of the system) - - name (the type of the metric: snmp, snmp6 or netstat) +* All measurements have the following tags + * host (host of the system) + * name (the type of the metric: snmp, snmp6 or netstat) diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 4408b8f728579..b5ada855479c9 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -138,10 +138,10 @@ func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { if bytes.Equal(fields[i+1], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(fields[i])] = int64(0) - continue } + + entries[string(fields[i])] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) @@ -176,10 +176,10 @@ func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { if bytes.Equal(metrics[j], zeroByte) { if !ns.DumpZeros { continue - } else { - entries[string(append(prefix, headers[j]...))] = int64(0) - continue } + + entries[string(append(prefix, headers[j]...))] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(metrics[j]), 10, 64) diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md index e691200ddd682..41684cc40550c 100644 --- a/plugins/inputs/ntpq/README.md +++ b/plugins/inputs/ntpq/README.md @@ -24,7 +24,7 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration: +## Configuration ```toml # Get standard NTP query metrics, requires ntpq executable @@ -33,27 +33,27 @@ server (RMS of difference of multiple time samples, milliseconds); dns_lookup = true ``` -### Measurements & Fields: +## Measurements & Fields - ntpq - - delay (float, milliseconds) - - jitter (float, milliseconds) - - offset (float, milliseconds) - - poll (int, seconds) - - reach (int) - - when (int, seconds) + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - reach (int) + - when (int, seconds) -### Tags: +## Tags - All measurements have the following tags: - - refid - - remote - - type - - stratum + - refid + - remote + - type + - stratum -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter ntpq --test * Plugin: ntpq, Collection 1 > ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134 diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index a952783a344a6..6b924fc52298a 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -50,7 +50,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Due to problems with a parsing, we have to use regexp expression in order // to remove string that starts from '(' and ends with space // see: https://github.com/influxdata/telegraf/issues/2386 - reg, err := regexp.Compile("\\s+\\([\\S]*") + reg, err := regexp.Compile(`\s+\([\S]*`) if err != nil { return err } diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index b0db77e45784f..54d4e10e717ac 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -20,7 +20,7 @@ func TestSingleNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -49,7 +49,7 @@ func TestBadIntNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -77,7 +77,7 @@ func TestBadFloatNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(2), @@ -105,7 +105,7 @@ func TestDaysNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(172800), @@ -134,7 +134,7 @@ func TestHoursNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(7200), @@ -163,7 +163,7 @@ func TestMinutesNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(120), @@ -192,7 +192,7 @@ func TestBadWhenNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(256), @@ -222,7 +222,7 @@ func TestParserNTPQ(t *testing.T) { n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(64), @@ -285,7 +285,7 @@ func TestMultiNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "delay": float64(54.033), @@ -329,7 +329,7 @@ func TestBadHeaderNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -357,7 +357,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -378,13 +378,13 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { func TestFailedNTPQ(t *testing.T) { tt := tester{ ret: []byte(singleNTPQ), - err: fmt.Errorf("Test failure"), + err: fmt.Errorf("test failure"), } n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) } // It is possible for the output of ntqp to be missing the refid column. This diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index c889e016fc464..2ca257c2790ba 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -2,26 +2,33 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. -### Configuration +## Configuration ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling # timeout = "5s" ``` -#### Windows +### Linux + +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` + +### Windows On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe` You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` -### Metrics +## Metrics + - measurement: `nvidia_smi` - tags - `name` (type of GPU e.g. `GeForce GTX 1070 Ti`) @@ -55,7 +62,7 @@ You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program - `driver_version` (string) - `cuda_version` (string) -### Sample Query +## Sample Query The below query could be used to alert on the average temperature of the your GPUs over the last minute @@ -63,30 +70,34 @@ The below query could be used to alert on the average temperature of the your GP SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" ``` -### Troubleshooting +## Troubleshooting Check the full output by running `nvidia-smi` binary manually. Linux: + ```sh sudo -u telegraf -- /usr/bin/nvidia-smi -q -x ``` Windows: -``` + +```sh "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x ``` Please include the output of this command if opening an GitHub issue. -### Example Output -``` +## Example Output + +```text nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 ``` -### Limitations +## Limitations + Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 3e4fb03f04221..68f25ba428611 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -31,7 +31,9 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling @@ -39,12 +41,21 @@ func (smi *NvidiaSMI) SampleConfig() string { ` } -// Gather implements the telegraf interface -func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { +func (smi *NvidiaSMI) Init() error { if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) + binPath, err := exec.LookPath("nvidia-smi") + // fail-fast + if err != nil { + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + } + smi.BinPath = binPath } + return nil +} + +// Gather implements the telegraf interface +func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { data, err := smi.pollSMI() if err != nil { return err diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index f28981f7482ae..edd9b77c99921 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -5,7 +5,7 @@ The `opcua` plugin retrieves data from OPC UA client devices. Telegraf minimum version: Telegraf 1.16 Plugin minimum tested version: 1.16 -### Configuration: +## Configuration ```toml [[inputs.opcua]] @@ -91,23 +91,28 @@ Plugin minimum tested version: 1.16 #] ``` -### Node Configuration +## Node Configuration + An OPC UA node ID may resemble: "n=3;s=Temperature". In this example: + - n=3 is indicating the `namespace` is 3 - s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' - This example temperature node has a value of 79.0 To gather data from this node enter the following line into the 'nodes' property above: -``` + +```shell {field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` This node configuration produces a metric like this: -``` + +```text opcua,id=n\=3;s\=Temperature temp=79.0,quality="OK (0x0)" 1597820490000000000 ``` -### Group Configuration +## Group Configuration + Groups can set default values for the namespace, identifier type, and tags settings. The default values apply to all the nodes in the group. If a default is set, a node may omit the setting altogether. @@ -119,7 +124,8 @@ a tag with the same name is set in both places, the tag value from the node is used. This example group configuration has two groups with two nodes each: -``` + +```toml [[inputs.opcua.group]] name="group1_metric_name" namespace="3" @@ -141,7 +147,8 @@ This example group configuration has two groups with two nodes each: ``` It produces metrics like these: -``` + +```text group1_metric_name,group1_tag=val1,id=ns\=3;i\=1001,node1_tag=val2 name=0,Quality="OK (0x0)" 1606893246000000000 group1_metric_name,group1_tag=val1,id=ns\=3;i\=1002,node1_tag=val3 name=-1.389117,Quality="OK (0x0)" 1606893246000000000 group2_metric_name,group2_tag=val3,id=ns\=3;i\=1003,node2_tag=val4 Quality="OK (0x0)",saw=-1.6 1606893246000000000 diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go index d59adc453ba8b..14315e5fe0e2d 100644 --- a/plugins/inputs/opcua/opcua_client.go +++ b/plugins/inputs/opcua/opcua_client.go @@ -10,6 +10,7 @@ import ( "github.com/gopcua/opcua" "github.com/gopcua/opcua/ua" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" @@ -51,7 +52,6 @@ type OpcUA struct { opts []opcua.Option } -// OPCTag type type NodeSettings struct { FieldName string `toml:"name"` Namespace string `toml:"namespace"` @@ -242,14 +242,14 @@ func (o *OpcUA) validateEndpoint() error { //search security policy type switch o.SecurityPolicy { case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": - break + // Valid security policy type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) } //search security mode type switch o.SecurityMode { case "None", "Sign", "SignAndEncrypt", "auto": - break + // Valid security mode type - do nothing. default: return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) } @@ -384,7 +384,7 @@ func (o *OpcUA) validateOPCTags() error { //search identifier type switch node.tag.IdentifierType { case "s", "i", "g", "b": - break + // Valid identifier type - do nothing. default: return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) } @@ -468,14 +468,14 @@ func (o *OpcUA) setupOptions() error { if o.Certificate == "" && o.PrivateKey == "" { if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, 365*24*time.Hour) if err != nil { return err } } } - o.opts, err = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) + o.opts, err = o.generateClientOpts(endpoints) return err } diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go index 4c7805578b114..27bfc1ecf4342 100644 --- a/plugins/inputs/opcua/opcua_client_test.go +++ b/plugins/inputs/opcua/opcua_client_test.go @@ -6,11 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type OPCTags struct { @@ -137,30 +136,30 @@ nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] func TestTagsSliceToMap(t *testing.T) { m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) - assert.NoError(t, err) - assert.Len(t, m, 2) - assert.Equal(t, m["foo"], "bar") - assert.Equal(t, m["baz"], "bat") + require.NoError(t, err) + require.Len(t, m, 2) + require.Equal(t, m["foo"], "bar") + require.Equal(t, m["baz"], "bat") } func TestTagsSliceToMap_twoStrings(t *testing.T) { var err error _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) - assert.Error(t, err) + require.Error(t, err) _, err = tagsSliceToMap([][]string{{"foo"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_dupeKey(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) - assert.Error(t, err) + require.Error(t, err) } func TestTagsSliceToMap_empty(t *testing.T) { _, err := tagsSliceToMap([][]string{{"foo", ""}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty value"), err) _, err = tagsSliceToMap([][]string{{"", "bar"}}) - assert.Equal(t, fmt.Errorf("tag 1 has empty name"), err) + require.Equal(t, fmt.Errorf("tag 1 has empty name"), err) } func TestValidateOPCTags(t *testing.T) { diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index e1304fa304fc6..0afe07115e197 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -9,7 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "log" "math/big" "net" "net/url" @@ -146,7 +145,7 @@ func pemBlockForKey(priv interface{}) (*pem.Block, error) { } //revive:disable-next-line -func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) ([]opcua.Option, error) { +func (o *OpcUA) generateClientOpts(endpoints []*ua.EndpointDescription) ([]opcua.Option, error) { opts := []opcua.Option{} appuri := "urn:telegraf:gopcua:client" appname := "Telegraf" @@ -154,13 +153,16 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, // ApplicationURI is automatically read from the cert so is not required if a cert if provided opts = append(opts, opcua.ApplicationURI(appuri)) opts = append(opts, opcua.ApplicationName(appname)) + opts = append(opts, opcua.RequestTimeout(time.Duration(o.RequestTimeout))) - opts = append(opts, opcua.RequestTimeout(requestTimeout)) - + certFile := o.Certificate + keyFile := o.PrivateKey + policy := o.SecurityPolicy + mode := o.SecurityMode var err error if certFile == "" && keyFile == "" { if policy != "None" || mode != "None" { - certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, 365*24*time.Hour) if err != nil { return nil, err } @@ -172,7 +174,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, debug.Printf("Loading cert/key from %s/%s", certFile, keyFile) c, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { - log.Printf("Failed to load certificate: %s", err) + o.Log.Warnf("Failed to load certificate: %s", err) } else { pk, ok := c.PrivateKey.(*rsa.PrivateKey) if !ok { @@ -198,7 +200,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } // Select the most appropriate authentication mode from server capabilities and user input - authMode, authOption, err := generateAuth(auth, cert, username, password) + authMode, authOption, err := o.generateAuth(o.AuthMethod, cert, o.Username, o.Password) if err != nil { return nil, err } @@ -276,7 +278,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, return opts, nil } -func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { +func (o *OpcUA) generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { var err error var authMode ua.UserTokenType @@ -313,7 +315,7 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua authOption = opcua.AuthIssuedToken([]byte(nil)) default: - log.Printf("unknown auth-mode, defaulting to Anonymous") + o.Log.Warnf("unknown auth-mode, defaulting to Anonymous") authMode = ua.UserTokenTypeAnonymous authOption = opcua.AuthAnonymous() } diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md index fcb175bd430f8..9b2dd44214d14 100644 --- a/plugins/inputs/openldap/README.md +++ b/plugins/inputs/openldap/README.md @@ -2,7 +2,7 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend. -### Configuration: +## Configuration To use this plugin you must enable the [slapd monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. @@ -31,11 +31,11 @@ To use this plugin you must enable the [slapd monitoring](https://www.openldap.o reverse_metric_names = true ``` -### Measurements & Fields: +## Measurements & Fields All **monitorCounter**, **monitoredInfo**, **monitorOpInitiated**, and **monitorOpCompleted** attributes are gathered based on this LDAP query: -``` +```sh (|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject)) ``` @@ -46,52 +46,52 @@ Metrics for the **monitorOp*** attributes have **_initiated** and **_completed** An OpenLDAP 2.4 server will provide these metrics: - openldap - - connections_current - - connections_max_file_descriptors - - connections_total - - operations_abandon_completed - - operations_abandon_initiated - - operations_add_completed - - operations_add_initiated - - operations_bind_completed - - operations_bind_initiated - - operations_compare_completed - - operations_compare_initiated - - operations_delete_completed - - operations_delete_initiated - - operations_extended_completed - - operations_extended_initiated - - operations_modify_completed - - operations_modify_initiated - - operations_modrdn_completed - - operations_modrdn_initiated - - operations_search_completed - - operations_search_initiated - - operations_unbind_completed - - operations_unbind_initiated - - statistics_bytes - - statistics_entries - - statistics_pdu - - statistics_referrals - - threads_active - - threads_backload - - threads_max - - threads_max_pending - - threads_open - - threads_pending - - threads_starting - - time_uptime - - waiters_read - - waiters_write - -### Tags: + - connections_current + - connections_max_file_descriptors + - connections_total + - operations_abandon_completed + - operations_abandon_initiated + - operations_add_completed + - operations_add_initiated + - operations_bind_completed + - operations_bind_initiated + - operations_compare_completed + - operations_compare_initiated + - operations_delete_completed + - operations_delete_initiated + - operations_extended_completed + - operations_extended_initiated + - operations_modify_completed + - operations_modify_initiated + - operations_modrdn_completed + - operations_modrdn_initiated + - operations_search_completed + - operations_search_initiated + - operations_unbind_completed + - operations_unbind_initiated + - statistics_bytes + - statistics_entries + - statistics_pdu + - statistics_referrals + - threads_active + - threads_backload + - threads_max + - threads_max_pending + - threads_open + - threads_pending + - threads_starting + - time_uptime + - waiters_read + - waiters_write + +## Tags - server= # value from config - port= # value from config -### Example Output: +## Example Output -``` +```shell $ telegraf -config telegraf.conf -input-filter openldap -test --debug * Plugin: inputs.openldap, Collection 1 > openldap,server=localhost,port=389,host=niska.ait.psu.edu operations_bind_initiated=10i,operations_unbind_initiated=6i,operations_modrdn_completed=0i,operations_delete_initiated=0i,operations_add_completed=2i,operations_delete_completed=0i,operations_abandon_completed=0i,statistics_entries=1516i,threads_open=2i,threads_active=1i,waiters_read=1i,operations_modify_completed=0i,operations_extended_initiated=4i,threads_pending=0i,operations_search_initiated=36i,operations_compare_initiated=0i,connections_max_file_descriptors=4096i,operations_modify_initiated=0i,operations_modrdn_initiated=0i,threads_max=16i,time_uptime=6017i,connections_total=1037i,connections_current=1i,operations_add_initiated=2i,statistics_bytes=162071i,operations_unbind_completed=6i,operations_abandon_initiated=0i,statistics_pdu=1566i,threads_max_pending=0i,threads_backload=1i,waiters_write=0i,operations_bind_completed=10i,operations_search_completed=35i,operations_compare_completed=0i,operations_extended_completed=4i,statistics_referrals=0i,threads_starting=0i 1516912070000000000 diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index f3f7b47cf597c..7a3f766718c52 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,10 +5,11 @@ import ( "strconv" "strings" + "gopkg.in/ldap.v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/ldap.v3" ) type Openldap struct { @@ -110,13 +111,15 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - if o.TLS == "ldaps" { + + switch o.TLS { + case "ldaps": l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig) if err != nil { acc.AddError(err) return nil } - } else if o.TLS == "starttls" { + case "starttls": l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port)) if err != nil { acc.AddError(err) @@ -127,7 +130,7 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - } else { + default: acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index b3e171b22e9db..ac9e810f0b49e 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,10 +4,10 @@ import ( "strconv" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/ldap.v3" + + "github.com/influxdata/telegraf/testutil" ) func TestOpenldapMockResult(t *testing.T) { @@ -45,9 +45,9 @@ func TestOpenldapNoConnectionIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { @@ -108,9 +108,9 @@ func TestOpenldapInvalidSSLIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } func TestOpenldapBindIntegration(t *testing.T) { @@ -132,11 +132,11 @@ func TestOpenldapBindIntegration(t *testing.T) { } func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { - assert.Empty(t, acc.Errors, "accumulator had no errors") - assert.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") - assert.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") - assert.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") - assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") + require.Empty(t, acc.Errors, "accumulator had no errors") + require.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") + require.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") + require.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") + require.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } func TestOpenldapReverseMetricsIntegration(t *testing.T) { @@ -155,5 +155,5 @@ func TestOpenldapReverseMetricsIntegration(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") + require.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") } diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md index 877c3a46092b1..f1b418e7849e2 100644 --- a/plugins/inputs/openntpd/README.md +++ b/plugins/inputs/openntpd/README.md @@ -20,7 +20,7 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration +## Configuration ```toml [[inputs.openntpd]] @@ -34,7 +34,7 @@ server (RMS of difference of multiple time samples, milliseconds); # timeout = "5ms" ``` -### Metrics +## Metrics - ntpctl - tags: @@ -49,7 +49,7 @@ server (RMS of difference of multiple time samples, milliseconds); - wt (int) - tl (int) -### Permissions +## Permissions It's important to note that this plugin references ntpctl, which may require additional permissions to execute successfully. @@ -57,6 +57,7 @@ Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -69,12 +70,14 @@ telegraf : telegraf ntpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.openntpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following lines: @@ -85,9 +88,9 @@ Defaults!NTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output +## Example Output -``` +```shell openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 ``` diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index f26419a71101e..ffca02b31a908 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,16 +3,13 @@ package openntpd import ( "bytes" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) -var TestTimeout = config.Duration(time.Second) - func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil @@ -26,11 +23,11 @@ func TestParseSimpleOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -57,11 +54,11 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -89,11 +86,11 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -117,11 +114,11 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -159,11 +156,11 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -187,11 +184,11 @@ func TestParseFullOutput(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(20)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(20)) - assert.Equal(t, acc.NFields(), 113) + require.Equal(t, acc.NFields(), 113) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index 5bbd4be89658a..ba360e45bf6fa 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -2,7 +2,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server-side SMTP protocol](https://www.opensmtpd.org/) -### Configuration: +## Configuration ```toml [[inputs.opensmtpd]] @@ -16,7 +16,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- #timeout = "1s" ``` -### Measurements & Fields: +## Measurements & Fields This is the full list of stats provided by smtpctl and potentially collected by telegram depending of your smtpctl configuration. @@ -59,12 +59,13 @@ depending of your smtpctl configuration. smtp_session_local uptime -### Permissions: +## Permissions It's important to note that this plugin references smtpctl, which may require additional permissions to execute successfully. Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -77,12 +78,14 @@ telegraf : telegraf opensmtpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.opensmtpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -93,9 +96,9 @@ Defaults!SMTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output: +## Example Output -``` +```shell telegraf --config etc/telegraf.conf --input-filter opensmtpd --test * Plugin: inputs.opensmtpd, Collection 1 > opensmtpd,host=localhost scheduler_delivery_tempfail=822,mta_host=10,mta_task_running=4,queue_bounce=13017,scheduler_delivery_permfail=51022,mta_relay=7,queue_evpcache_size=2,scheduler_envelope_expired=26,bounce_message=0,mta_domain=7,queue_evpcache_update_hit=848,smtp_session_local=12294,bounce_envelope=0,queue_evpcache_load_hit=4389703,scheduler_ramqueue_update=0,mta_route=3,scheduler_delivery_ok=2149489,smtp_session_inet4=2131997,control_session=1,scheduler_envelope_incoming=0,uptime=10346728,scheduler_ramqueue_envelope=2,smtp_session=0,bounce_session=0,mta_envelope=2,mta_session=6,mta_task=2,scheduler_ramqueue_message=2,mta_connector=7,mta_source=1,scheduler_envelope=2,scheduler_envelope_inflight=2 1510220300000000000 diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index fb3afa82e0171..3b625be51cef2 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -4,9 +4,10 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { @@ -22,11 +23,11 @@ func TestFilterSomeStats(t *testing.T) { } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("opensmtpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("opensmtpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 36) + require.Equal(t, acc.NFields(), 36) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/openstack/README.md b/plugins/inputs/openstack/README.md new file mode 100644 index 0000000000000..c67d36333363a --- /dev/null +++ b/plugins/inputs/openstack/README.md @@ -0,0 +1,362 @@ + +# OpenStack Input Plugin + +Collects the metrics from following services of OpenStack: + +* CINDER(Block Storage) +* GLANCE(Image service) +* HEAT(Orchestration) +* KEYSTONE(Identity service) +* NEUTRON(Networking) +* NOVA(Compute Service) + +At present this plugin requires the following APIs: + +* blockstorage v2 +* compute v2 +* identity v3 +* networking v2 +* orchestration v1 + +## Configuration and Recommendations + +### Recommendations + +Due to the large number of unique tags that this plugin generates, in order to keep the cardinality down it is **highly recommended** to use [modifiers](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#modifiers) like `tagexclude` to discard unwanted tags. + +For deployments with only a small number of VMs and hosts, a small polling interval (e.g. seconds-minutes) is acceptable. For larger deployments, polling a large number of systems will impact performance. Use the `interval` option to change how often the plugin is run: + +`interval`: How often a metric is gathered. Setting this value at the plugin level overrides the global agent interval setting. + +Also, consider polling OpenStack services at different intervals depending on your requirements. This will help with load and cardinality as well. + +```toml +[[inputs.openstack]] + interval = 5m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["nova_services"] + .... + +[[inputs.openstack]] + interval = 30m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + .... +``` + +### Configuration + +```toml + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data recieved from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false +``` + +### Measurements, Tags & Fields + +* openstack_aggregate + * name + * aggregate_host [string] + * aggregate_hosts [integer] + * created_at [string] + * deleted [boolean] + * deleted_at [string] + * id [integer] + * updated_at [string] +* openstack_flavor + * is_public + * name + * disk [integer] + * ephemeral [integer] + * id [string] + * ram [integer] + * rxtx_factor [float] + * swap [integer] + * vcpus [integer] +* openstack_hypervisor + * cpu_arch + * cpu_feature_tsc + * cpu_feature_tsc-deadline + * cpu_feature_tsc_adjust + * cpu_feature_tsx-ctrl + * cpu_feature_vme + * cpu_feature_vmx + * cpu_feature_x2apic + * cpu_feature_xgetbv1 + * cpu_feature_xsave + * cpu_model + * cpu_vendor + * hypervisor_hostname + * hypervisor_type + * hypervisor_version + * service_host + * service_id + * state + * status + * cpu_topology_cores [integer] + * cpu_topology_sockets [integer] + * cpu_topology_threads [integer] + * current_workload [integer] + * disk_available_least [integer] + * free_disk_gb [integer] + * free_ram_mb [integer] + * host_ip [string] + * id [string] + * local_gb [integer] + * local_gb_used [integer] + * memory_mb [integer] + * memory_mb_used [integer] + * running_vms [integer] + * vcpus [integer] + * vcpus_used [integer] +* openstack_identity + * description + * domain_id + * name + * parent_id + * enabled boolean + * id string + * is_domain boolean + * projects integer +* openstack_network + * name + * openstack_tags_xyz + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * availability_zone_hints [string] + * created_at [string] + * id [string] + * shared [boolean] + * subnet_id [string] + * subnets [integer] + * updated_at [string] +* openstack_newtron_agent + * agent_host + * agent_type + * availability_zone + * binary + * topic + * admin_state_up [boolean] + * alive [boolean] + * created_at [string] + * heartbeat_timestamp [string] + * id [string] + * resources_synced [boolean] + * started_at [string] +* openstack_nova_service + * host_machine + * name + * state + * status + * zone + * disabled_reason [string] + * forced_down [boolean] + * id [string] + * updated_at [string] +* openstack_port + * device_id + * device_owner + * name + * network_id + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * allowed_address_pairs [integer] + * fixed_ips [integer] + * id [string] + * ip_address [string] + * mac_address [string] + * security_groups [string] + * subnet_id [string] +* openstack_request_duration + * agents [integer] + * aggregates [integer] + * flavors [integer] + * hypervisors [integer] + * networks [integer] + * nova_services [integer] + * ports [integer] + * projects [integer] + * servers [integer] + * stacks [integer] + * storage_pools [integer] + * subnets [integer] + * volumes [integer] +* openstack_server + * flavor + * host_id + * host_name + * image + * key_name + * name + * project + * status + * tenant_id + * user_id + * accessIPv4 [string] + * accessIPv6 [string] + * addresses [integer] + * adminPass [string] + * created [string] + * disk_gb [integer] + * fault_code [integer] + * fault_created [string] + * fault_details [string] + * fault_message [string] + * id [string] + * progress [integer] + * ram_mb [integer] + * security_groups [integer] + * updated [string] + * vcpus [integer] + * volume_id [string] + * volumes_attached [integer] +* openstack_server_diagnostics + * disk_name + * no_of_disks + * no_of_ports + * port_name + * server_id + * cpu0_time [float] + * cpu1_time [float] + * cpu2_time [float] + * cpu3_time [float] + * cpu4_time [float] + * cpu5_time [float] + * cpu6_time [float] + * cpu7_time [float] + * disk_errors [float] + * disk_read [float] + * disk_read_req [float] + * disk_write [float] + * disk_write_req [float] + * memory [float] + * memory-actual [float] + * memory-rss [float] + * memory-swap_in [float] + * port_rx [float] + * port_rx_drop [float] + * port_rx_errors [float] + * port_rx_packets [float] + * port_tx [float] + * port_tx_drop [float] + * port_tx_errors [float] + * port_tx_packets [float] +* openstack_service + * name + * service_enabled [boolean] + * service_id [string] +* openstack_storage_pool + * driver_version + * name + * storage_protocol + * vendor_name + * volume_backend_name + * free_capacity_gb [float] + * total_capacity_gb [float] +* openstack_subnet + * cidr + * gateway_ip + * ip_version + * name + * network_id + * openstack_tags_subnet_type_PRV + * project_id + * tenant_id + * allocation_pools [string] + * dhcp_enabled [boolean] + * dns_nameservers [string] + * id [string] +* openstack_volume + * attachment_attachment_id + * attachment_device + * attachment_host_name + * availability_zone + * bootable + * description + * name + * status + * user_id + * volume_type + * attachment_attached_at [string] + * attachment_server_id [string] + * created_at [string] + * encrypted [boolean] + * id [string] + * multiattach [boolean] + * size [integer] + * total_attachments [integer] + * updated_at [string] + +### Example Output + +```text +> openstack_newtron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 +> openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000 +> openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000 +> openstack_hypervisor,cpu_arch=x86_64,cpu_feature_3dnowprefetch=true,cpu_feature_abm=true,cpu_feature_acpi=true,cpu_feature_adx=true,cpu_feature_aes=true,cpu_feature_apic=true,cpu_feature_xtpr=true,cpu_model=C-Server,cpu_vendor=xyz,host=telegraf_host,hypervisor_hostname=vim3,hypervisor_type=QEMU,hypervisor_version=4002000,service_host=vim3,service_id=192,state=up,status=enabled cpu_topology_cores=28i,cpu_topology_sockets=1i,cpu_topology_threads=2i,current_workload=0i,disk_available_least=2596i,free_disk_gb=2744i,free_ram_mb=374092i,host_ip="xx:xx:xx:x::xxx",id="12",local_gb=3366i,local_gb_used=622i,memory_mb=515404i,memory_mb_used=141312i,running_vms=15i,vcpus=0i,vcpus_used=72i 1634197618000000000 +> openstack_network,host=telegraf_host,name=Network\ 2,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,status=active,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx admin_state_up=true,availability_zone_hints="",created_at="2021-07-29T15:58:25Z",id="f5af5e71-e890-4245-a377-d4d86273c319",shared=false,subnet_id="2f7341c6-074d-42aa-9abc-71c662d9b336",subnets=1i,updated_at="2021-09-02T16:46:48Z" 1634197618000000000 +> openstack_nova_service,host=telegraf_host,host_machine=vim3,name=nova-compute,state=up,status=enabled,zone=nova disabled_reason="",forced_down=false,id="192",updated_at="2021-10-14T07:46:52Z" 1634197619000000000 +> openstack_port,device_id=a043b8b3-2831-462a-bba8-19088f3db45a,device_owner=compute:nova,host=telegraf_host,name=offload-port1,network_id=6b40d744-9a48-43f2-a4c8-2e0ccb45ac96,project_id=71f9bc44621234f8af99a3949258fc7b,status=ACTIVE,tenant_id=71f9bc44621234f8af99a3949258fc7b admin_state_up=true,allowed_address_pairs=0i,fixed_ips=1i,id="fb64626a-07e1-4d78-a70d-900e989537cc",ip_address="1.1.1.5",mac_address="xx:xx:xx:xx:xx:xx",security_groups="",subnet_id="eafa1eca-b318-4746-a55a-682478466689" 1634197620000000000 +> openstack_identity,domain_id=default,host=telegraf_host,name=service,parent_id=default enabled=true,id="a0877dd2ed1d4b5f952f5689bc04b0cb",is_domain=false,projects=7i 1634197621000000000 +> openstack_server,flavor=0d438971-56cf-4f86-801f-7b04b29384cb,host=telegraf_host,host_id=c0fe05b14261d35cf8748a3f5aae1234b88c2fd62b69fe24ca4a27e9,host_name=vim1,image=b295f1f3-1w23-470c-8734-197676eedd16,name=test-VM7,project=admin,status=active,tenant_id=80ac889731f540498fb1dc78e4bcd5ed,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx accessIPv4="",accessIPv6="",addresses=1i,adminPass="",created="2021-09-07T14:40:11Z",disk_gb=8i,fault_code=0i,fault_created="0001-01-01T00:00:00Z",fault_details="",fault_message="",id="db92ee0d-459b-458e-9fe3-2be5ec7c87e1",progress=0i,ram_mb=16384i,security_groups=1i,updated="2021-09-07T14:40:19Z",vcpus=4i,volumes_attached=0i 1634197656000000000 +> openstack_service,host=telegraf_host,name=identity service_enabled=true,service_id="ad605eff92444a158d0f78768f2c4668" 1634197656000000000 +> openstack_storage_pool,driver_version=1.0.0,host=telegraf_host,name=storage_bloack_1,storage_protocol=nfs,vendor_name=xyz,volume_backend_name=abc free_capacity_gb=4847.54,total_capacity_gb=4864 1634197658000000000 +> openstack_subnet,cidr=10.10.20.10/28,gateway_ip=10.10.20.17,host=telegraf_host,ip_version=4,name=IPv4_Subnet_2,network_id=73c6e1d3-f522-4a3f-8e3c-762a0c06d68b,openstack_tags_lab=True,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx allocation_pools="10.10.20.11-10.10.20.30",dhcp_enabled=true,dns_nameservers="",id="db69fbb2-9ca1-4370-8c78-82a27951c94b" 1634197660000000000 +> openstack_volume,attachment_attachment_id=c83ca0d6-c467-44a0-ac1f-f87d769c0c65,attachment_device=/dev/vda,attachment_host_name=vim1,availability_zone=nova,bootable=true,host=telegraf_host,status=in-use,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,volume_type=storage_bloack_1 attachment_attached_at="2021-01-12T21:02:04Z",attachment_server_id="c0c6b4af-0d26-4a0b-a6b4-4ea41fa3bb4a",created_at="2021-01-12T21:01:47Z",encrypted=false,id="d4204f1b-b1ae-1233-b25c-a57d91d2846e",multiattach=false,size=80i,total_attachments=1i,updated_at="2021-01-12T21:02:04Z" 1634197660000000000 +> openstack_request_duration,host=telegraf_host networks=703214354i 1634197660000000000 +> openstack_server_diagnostics,disk_name=vda,host=telegraf_host,no_of_disks=1,no_of_ports=2,port_name=vhu1234566c-9c,server_id=fdddb58c-bbb9-1234-894b-7ae140178909 cpu0_time=4924220000000,cpu1_time=218809610000000,cpu2_time=218624300000000,cpu3_time=220505700000000,disk_errors=-1,disk_read=619156992,disk_read_req=35423,disk_write=8432728064,disk_write_req=882445,memory=8388608,memory-actual=8388608,memory-rss=37276,memory-swap_in=0,port_rx=410516469288,port_rx_drop=13373626,port_rx_errors=-1,port_rx_packets=52140392,port_tx=417312195654,port_tx_drop=0,port_tx_errors=0,port_tx_packets=321385978 1634197660000000000 +``` diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go new file mode 100644 index 0000000000000..eac0116e98fd4 --- /dev/null +++ b/plugins/inputs/openstack/openstack.go @@ -0,0 +1,958 @@ +// Package openstack implements an OpenStack input plugin for Telegraf +// +// The OpenStack input plug is a simple two phase metric collector. In the first +// pass a set of gatherers are run against the API to cache collections of resources. +// In the second phase the gathered resources are combined and emitted as metrics. +// +// No aggregation is performed by the input plugin, instead queries to InfluxDB should +// be used to gather global totals of things such as tag frequency. +package openstack + +import ( + "context" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diagnostics" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + nova_services "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/agents" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + typePort = regexp.MustCompile(`_rx$|_rx_drop$|_rx_errors$|_rx_packets$|_tx$|_tx_drop$|_tx_errors$|_tx_packets$`) + typeCPU = regexp.MustCompile(`cpu[0-9]{1,2}_time$`) + typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`) +) + +// volume is a structure used to unmarshal raw JSON from the API into. +type volume struct { + volumes.Volume + volumetenants.VolumeTenantExt +} + +// OpenStack is the main structure associated with a collection instance. +type OpenStack struct { + // Configuration variables + IdentityEndpoint string `toml:"authentication_endpoint"` + Domain string `toml:"domain"` + Project string `toml:"project"` + Username string `toml:"username"` + Password string `toml:"password"` + EnabledServices []string `toml:"enabled_services"` + ServerDiagnotics bool `toml:"server_diagnotics"` + OutputSecrets bool `toml:"output_secrets"` + TagPrefix string `toml:"tag_prefix"` + TagValue string `toml:"tag_value"` + HumanReadableTS bool `toml:"human_readable_timestamps"` + MeasureRequest bool `toml:"measure_openstack_requests"` + Log telegraf.Logger `toml:"-"` + httpconfig.HTTPClientConfig + + // Locally cached clients + identity *gophercloud.ServiceClient + compute *gophercloud.ServiceClient + volume *gophercloud.ServiceClient + network *gophercloud.ServiceClient + stack *gophercloud.ServiceClient + + // Locally cached resources + openstackFlavors map[string]flavors.Flavor + openstackHypervisors []hypervisors.Hypervisor + diag map[string]interface{} + openstackProjects map[string]projects.Project + openstackServices map[string]services.Service +} + +// containsService indicates whether a particular service is enabled +func (o *OpenStack) containsService(t string) bool { + for _, service := range o.openstackServices { + if service.Type == t { + return true + } + } + + return false +} + +// convertTimeFormat, to convert time format based on HumanReadableTS +func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { + if o.HumanReadableTS { + return t.Format("2006-01-02T15:04:05.999999999Z07:00") + } + return t.UnixNano() +} + +// Description returns a description string of the input plugin and implements +// the Input interface. +func (o *OpenStack) Description() string { + return "Collects performance metrics from OpenStack services" +} + +// sampleConfig is a sample configuration file entry. +var sampleConfig = ` + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data recieved from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false +` + +// SampleConfig return a sample configuration file for auto-generation and +// implements the Input interface. +func (o *OpenStack) SampleConfig() string { + return sampleConfig +} + +// initialize performs any necessary initialization functions +func (o *OpenStack) Init() error { + if len(o.EnabledServices) == 0 { + o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"} + } + if o.Username == "" || o.Password == "" { + return fmt.Errorf("username or password can not be empty string") + } + if o.TagValue == "" { + return fmt.Errorf("tag_value option can not be empty string") + } + sort.Strings(o.EnabledServices) + o.openstackFlavors = map[string]flavors.Flavor{} + o.openstackHypervisors = []hypervisors.Hypervisor{} + o.diag = map[string]interface{}{} + o.openstackProjects = map[string]projects.Project{} + o.openstackServices = map[string]services.Service{} + + // Authenticate against Keystone and get a token provider + authOption := gophercloud.AuthOptions{ + IdentityEndpoint: o.IdentityEndpoint, + DomainName: o.Domain, + TenantName: o.Project, + Username: o.Username, + Password: o.Password, + } + provider, err := openstack.NewClient(authOption.IdentityEndpoint) + if err != nil { + return fmt.Errorf("unable to create client for OpenStack endpoint %v", err) + } + + ctx := context.Background() + client, err := o.HTTPClientConfig.CreateClient(ctx, o.Log) + if err != nil { + return err + } + + provider.HTTPClient = *client + + if err := openstack.Authenticate(provider, authOption); err != nil { + return fmt.Errorf("unable to authenticate OpenStack user %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.identity, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V3 identity client %v", err) + } + + if err := o.gatherServices(); err != nil { + return fmt.Errorf("failed to get resource openstack services %v", err) + } + + if o.compute, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 compute client %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.network, err = openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 network client %v", err) + } + + // The Orchestration service is optional + if o.containsService("orchestration") { + if o.stack, err = openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V1 stack client %v", err) + } + } + + // The Cinder volume storage service is optional + if o.containsService("volumev2") { + if o.volume, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 volume client %v", err) + } + } + + return nil +} + +// Gather gathers resources from the OpenStack API and accumulates metrics. This +// implements the Input interface. +func (o *OpenStack) Gather(acc telegraf.Accumulator) error { + // Gather resources. Note service harvesting must come first as the other + // gatherers are dependant on this information. + gatherers := map[string]func(telegraf.Accumulator) error{ + "projects": o.gatherProjects, + "hypervisors": o.gatherHypervisors, + "flavors": o.gatherFlavors, + "servers": o.gatherServers, + "volumes": o.gatherVolumes, + "storage_pools": o.gatherStoragePools, + "subnets": o.gatherSubnets, + "ports": o.gatherPorts, + "networks": o.gatherNetworks, + "aggregates": o.gatherAggregates, + "nova_services": o.gatherNovaServices, + "agents": o.gatherAgents, + "stacks": o.gatherStacks, + } + + callDuration := map[string]interface{}{} + for _, service := range o.EnabledServices { + // As Services are already gathered in Init(), using this to accumulate them. + if service == "services" { + o.accumulateServices(acc) + continue + } + start := time.Now() + gatherer := gatherers[service] + if err := gatherer(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource %q %v", service, err)) + } + callDuration[service] = time.Since(start).Nanoseconds() + } + + if o.MeasureRequest { + for service, duration := range callDuration { + acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, map[string]string{}) + } + } + + if o.ServerDiagnotics { + if !choice.Contains("servers", o.EnabledServices) { + if err := o.gatherServers(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource server diagnostics %v", err)) + return nil + } + } + o.accumulateServerDiagnostics(acc) + } + + return nil +} + +// gatherServices collects services from the OpenStack API. +func (o *OpenStack) gatherServices() error { + page, err := services.List(o.identity, &services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list services %v", err) + } + extractedServices, err := services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract services %v", err) + } + for _, service := range extractedServices { + o.openstackServices[service.ID] = service + } + + return nil +} + +// gatherStacks collects and accumulates stacks data from the OpenStack API. +func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error { + page, err := stacks.List(o.stack, &stacks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list stacks %v", err) + } + extractedStacks, err := stacks.ExtractStacks(page) + if err != nil { + return fmt.Errorf("unable to extract stacks %v", err) + } + for _, stack := range extractedStacks { + tags := map[string]string{ + "description": stack.Description, + "name": stack.Name, + } + for _, stackTag := range stack.Tags { + tags[o.TagPrefix+stackTag] = o.TagValue + } + fields := map[string]interface{}{ + "status": strings.ToLower(stack.Status), + "id": stack.ID, + "status_reason": stack.StatusReason, + "creation_time": o.convertTimeFormat(stack.CreationTime), + "updated_time": o.convertTimeFormat(stack.UpdatedTime), + } + acc.AddFields("openstack_stack", fields, tags) + } + + return nil +} + +// gatherNovaServices collects and accumulates nova_services data from the OpenStack API. +func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error { + page, err := nova_services.List(o.compute, &nova_services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list nova_services %v", err) + } + novaServices, err := nova_services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract nova_services %v", err) + } + for _, novaService := range novaServices { + tags := map[string]string{ + "name": novaService.Binary, + "host_machine": novaService.Host, + "state": novaService.State, + "status": strings.ToLower(novaService.Status), + "zone": novaService.Zone, + } + fields := map[string]interface{}{ + "id": novaService.ID, + "disabled_reason": novaService.DisabledReason, + "forced_down": novaService.ForcedDown, + "updated_at": o.convertTimeFormat(novaService.UpdatedAt), + } + acc.AddFields("openstack_nova_service", fields, tags) + } + + return nil +} + +// gatherSubnets collects and accumulates subnets data from the OpenStack API. +func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error { + page, err := subnets.List(o.network, &subnets.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list subnets %v", err) + } + extractedSubnets, err := subnets.ExtractSubnets(page) + if err != nil { + return fmt.Errorf("unable to extract subnets %v", err) + } + for _, subnet := range extractedSubnets { + var allocationPools []string + for _, pool := range subnet.AllocationPools { + allocationPools = append(allocationPools, pool.Start+"-"+pool.End) + } + tags := map[string]string{ + "network_id": subnet.NetworkID, + "name": subnet.Name, + "description": subnet.Description, + "ip_version": strconv.Itoa(subnet.IPVersion), + "cidr": subnet.CIDR, + "gateway_ip": subnet.GatewayIP, + "tenant_id": subnet.TenantID, + "project_id": subnet.ProjectID, + "ipv6_address_mode": subnet.IPv6AddressMode, + "ipv6_ra_mode": subnet.IPv6RAMode, + "subnet_pool_id": subnet.SubnetPoolID, + } + for _, subnetTag := range subnet.Tags { + tags[o.TagPrefix+subnetTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": subnet.ID, + "dhcp_enabled": subnet.EnableDHCP, + "dns_nameservers": strings.Join(subnet.DNSNameservers[:], ","), + "allocation_pools": strings.Join(allocationPools[:], ","), + } + acc.AddFields("openstack_subnet", fields, tags) + } + return nil +} + +// gatherPorts collects and accumulates ports data from the OpenStack API. +func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error { + page, err := ports.List(o.network, &ports.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list ports %v", err) + } + extractedPorts, err := ports.ExtractPorts(page) + if err != nil { + return fmt.Errorf("unable to extract ports %v", err) + } + for _, port := range extractedPorts { + tags := map[string]string{ + "network_id": port.NetworkID, + "name": port.Name, + "description": port.Description, + "status": strings.ToLower(port.Status), + "tenant_id": port.TenantID, + "project_id": port.ProjectID, + "device_owner": port.DeviceOwner, + "device_id": port.DeviceID, + } + for _, portTag := range port.Tags { + tags[o.TagPrefix+portTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": port.ID, + "mac_address": port.MACAddress, + "admin_state_up": port.AdminStateUp, + "fixed_ips": len(port.FixedIPs), + "allowed_address_pairs": len(port.AllowedAddressPairs), + "security_groups": strings.Join(port.SecurityGroups[:], ","), + } + if len(port.FixedIPs) > 0 { + for _, ip := range port.FixedIPs { + fields["subnet_id"] = ip.SubnetID + fields["ip_address"] = ip.IPAddress + acc.AddFields("openstack_port", fields, tags) + } + } else { + acc.AddFields("openstack_port", fields, tags) + } + } + return nil +} + +// gatherNetworks collects and accumulates networks data from the OpenStack API. +func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error { + page, err := networks.List(o.network, &networks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list networks %v", err) + } + extractedNetworks, err := networks.ExtractNetworks(page) + if err != nil { + return fmt.Errorf("unable to extract networks %v", err) + } + for _, network := range extractedNetworks { + tags := map[string]string{ + "name": network.Name, + "description": network.Description, + "status": strings.ToLower(network.Status), + "tenant_id": network.TenantID, + "project_id": network.ProjectID, + } + for _, networkTag := range network.Tags { + tags[o.TagPrefix+networkTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": network.ID, + "admin_state_up": network.AdminStateUp, + "subnets": len(network.Subnets), + "shared": network.Shared, + "availability_zone_hints": strings.Join(network.AvailabilityZoneHints[:], ","), + "updated_at": o.convertTimeFormat(network.UpdatedAt), + "created_at": o.convertTimeFormat(network.CreatedAt), + } + if len(network.Subnets) > 0 { + for _, subnet := range network.Subnets { + fields["subnet_id"] = subnet + acc.AddFields("openstack_network", fields, tags) + } + } else { + acc.AddFields("openstack_network", fields, tags) + } + } + return nil +} + +// gatherAgents collects and accumulates agents data from the OpenStack API. +func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error { + page, err := agents.List(o.network, &agents.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list newtron agents %v", err) + } + extractedAgents, err := agents.ExtractAgents(page) + if err != nil { + return fmt.Errorf("unable to extract newtron agents %v", err) + } + for _, agent := range extractedAgents { + tags := map[string]string{ + "agent_type": agent.AgentType, + "availability_zone": agent.AvailabilityZone, + "binary": agent.Binary, + "description": agent.Description, + "agent_host": agent.Host, + "topic": agent.Topic, + } + fields := map[string]interface{}{ + "id": agent.ID, + "admin_state_up": agent.AdminStateUp, + "alive": agent.Alive, + "resources_synced": agent.ResourcesSynced, + "created_at": o.convertTimeFormat(agent.CreatedAt), + "started_at": o.convertTimeFormat(agent.StartedAt), + "heartbeat_timestamp": o.convertTimeFormat(agent.HeartbeatTimestamp), + } + acc.AddFields("openstack_newtron_agent", fields, tags) + } + return nil +} + +// gatherAggregates collects and accumulates aggregates data from the OpenStack API. +func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error { + page, err := aggregates.List(o.compute).AllPages() + if err != nil { + return fmt.Errorf("unable to list aggregates %v", err) + } + extractedAggregates, err := aggregates.ExtractAggregates(page) + if err != nil { + return fmt.Errorf("unable to extract aggregates %v", err) + } + for _, aggregate := range extractedAggregates { + tags := map[string]string{ + "availability_zone": aggregate.AvailabilityZone, + "name": aggregate.Name, + } + fields := map[string]interface{}{ + "id": aggregate.ID, + "aggregate_hosts": len(aggregate.Hosts), + "deleted": aggregate.Deleted, + "created_at": o.convertTimeFormat(aggregate.CreatedAt), + "updated_at": o.convertTimeFormat(aggregate.UpdatedAt), + "deleted_at": o.convertTimeFormat(aggregate.DeletedAt), + } + if len(aggregate.Hosts) > 0 { + for _, host := range aggregate.Hosts { + fields["aggregate_host"] = host + acc.AddFields("openstack_aggregate", fields, tags) + } + } else { + acc.AddFields("openstack_aggregate", fields, tags) + } + } + return nil +} + +// gatherProjects collects and accumulates projects data from the OpenStack API. +func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error { + page, err := projects.List(o.identity, &projects.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list projects %v", err) + } + extractedProjects, err := projects.ExtractProjects(page) + if err != nil { + return fmt.Errorf("unable to extract projects %v", err) + } + for _, project := range extractedProjects { + o.openstackProjects[project.ID] = project + tags := map[string]string{ + "description": project.Description, + "domain_id": project.DomainID, + "name": project.Name, + "parent_id": project.ParentID, + } + for _, projectTag := range project.Tags { + tags[o.TagPrefix+projectTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": project.ID, + "is_domain": project.IsDomain, + "enabled": project.Enabled, + "projects": len(extractedProjects), + } + acc.AddFields("openstack_identity", fields, tags) + } + return nil +} + +// gatherHypervisors collects and accumulates hypervisors data from the OpenStack API. +func (o *OpenStack) gatherHypervisors(acc telegraf.Accumulator) error { + page, err := hypervisors.List(o.compute).AllPages() + if err != nil { + return fmt.Errorf("unable to list hypervisors %v", err) + } + extractedHypervisors, err := hypervisors.ExtractHypervisors(page) + if err != nil { + return fmt.Errorf("unable to extract hypervisors %v", err) + } + o.openstackHypervisors = extractedHypervisors + if choice.Contains("hypervisors", o.EnabledServices) { + for _, hypervisor := range extractedHypervisors { + tags := map[string]string{ + "cpu_vendor": hypervisor.CPUInfo.Vendor, + "cpu_arch": hypervisor.CPUInfo.Arch, + "cpu_model": hypervisor.CPUInfo.Model, + "status": strings.ToLower(hypervisor.Status), + "state": hypervisor.State, + "hypervisor_hostname": hypervisor.HypervisorHostname, + "hypervisor_type": hypervisor.HypervisorType, + "hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion), + "service_host": hypervisor.Service.Host, + "service_id": hypervisor.Service.ID, + "service_disabled_reason": hypervisor.Service.DisabledReason, + } + for _, cpuFeature := range hypervisor.CPUInfo.Features { + tags["cpu_feature_"+cpuFeature] = "true" + } + fields := map[string]interface{}{ + "id": hypervisor.ID, + "host_ip": hypervisor.HostIP, + "cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets, + "cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores, + "cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads, + "current_workload": hypervisor.CurrentWorkload, + "disk_available_least": hypervisor.DiskAvailableLeast, + "free_disk_gb": hypervisor.FreeDiskGB, + "free_ram_mb": hypervisor.FreeRamMB, + "local_gb": hypervisor.LocalGB, + "local_gb_used": hypervisor.LocalGBUsed, + "memory_mb": hypervisor.MemoryMB, + "memory_mb_used": hypervisor.MemoryMBUsed, + "running_vms": hypervisor.RunningVMs, + "vcpus": hypervisor.VCPUs, + "vcpus_used": hypervisor.VCPUsUsed, + } + acc.AddFields("openstack_hypervisor", fields, tags) + } + } + return nil +} + +// gatherFlavors collects and accumulates flavors data from the OpenStack API. +func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error { + page, err := flavors.ListDetail(o.compute, &flavors.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list flavors %v", err) + } + extractedflavors, err := flavors.ExtractFlavors(page) + if err != nil { + return fmt.Errorf("unable to extract flavors %v", err) + } + for _, flavor := range extractedflavors { + o.openstackFlavors[flavor.ID] = flavor + tags := map[string]string{ + "name": flavor.Name, + "is_public": strconv.FormatBool(flavor.IsPublic), + } + fields := map[string]interface{}{ + "id": flavor.ID, + "disk": flavor.Disk, + "ram": flavor.RAM, + "rxtx_factor": flavor.RxTxFactor, + "swap": flavor.Swap, + "vcpus": flavor.VCPUs, + "ephemeral": flavor.Ephemeral, + } + acc.AddFields("openstack_flavor", fields, tags) + } + return nil +} + +// gatherVolumes collects and accumulates volumes data from the OpenStack API. +func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error { + page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list volumes %v", err) + } + v := []volume{} + if err := volumes.ExtractVolumesInto(page, &v); err != nil { + return fmt.Errorf("unable to extract volumes %v", err) + } + for _, volume := range v { + tags := map[string]string{ + "status": strings.ToLower(volume.Status), + "availability_zone": volume.AvailabilityZone, + "name": volume.Name, + "description": volume.Description, + "volume_type": volume.VolumeType, + "snapshot_id": volume.SnapshotID, + "source_volid": volume.SourceVolID, + "bootable": volume.Bootable, + "replication_status": strings.ToLower(volume.ReplicationStatus), + "consistency_group_id": volume.ConsistencyGroupID, + } + fields := map[string]interface{}{ + "id": volume.ID, + "size": volume.Size, + "total_attachments": len(volume.Attachments), + "encrypted": volume.Encrypted, + "multiattach": volume.Multiattach, + "created_at": o.convertTimeFormat(volume.CreatedAt), + "updated_at": o.convertTimeFormat(volume.UpdatedAt), + } + if o.OutputSecrets { + tags["user_id"] = volume.UserID + } + if len(volume.Attachments) > 0 { + for _, attachment := range volume.Attachments { + if !o.HumanReadableTS { + fields["attachment_attached_at"] = attachment.AttachedAt.UnixNano() + } else { + fields["attachment_attached_at"] = attachment.AttachedAt.Format("2006-01-02T15:04:05.999999999Z07:00") + } + tags["attachment_attachment_id"] = attachment.AttachmentID + tags["attachment_device"] = attachment.Device + tags["attachment_host_name"] = attachment.HostName + fields["attachment_server_id"] = attachment.ServerID + acc.AddFields("openstack_volume", fields, tags) + } + } else { + acc.AddFields("openstack_volume", fields, tags) + } + } + return nil +} + +// gatherStoragePools collects and accumulates storage pools data from the OpenStack API. +func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error { + results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list storage pools %v", err) + } + storagePools, err := schedulerstats.ExtractStoragePools(results) + if err != nil { + return fmt.Errorf("unable to extract storage pools %v", err) + } + for _, storagePool := range storagePools { + tags := map[string]string{ + "name": storagePool.Capabilities.VolumeBackendName, + "driver_version": storagePool.Capabilities.DriverVersion, + "storage_protocol": storagePool.Capabilities.StorageProtocol, + "vendor_name": storagePool.Capabilities.VendorName, + "volume_backend_name": storagePool.Capabilities.VolumeBackendName, + } + fields := map[string]interface{}{ + "total_capacity_gb": storagePool.Capabilities.TotalCapacityGB, + "free_capacity_gb": storagePool.Capabilities.FreeCapacityGB, + } + acc.AddFields("openstack_storage_pool", fields, tags) + } + return nil +} + +// gatherServers collects servers from the OpenStack API. +func (o *OpenStack) gatherServers(acc telegraf.Accumulator) error { + if !choice.Contains("hypervisors", o.EnabledServices) { + if err := o.gatherHypervisors(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource hypervisors %v", err)) + } + } + serverGather := choice.Contains("servers", o.EnabledServices) + for _, hypervisor := range o.openstackHypervisors { + page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: true, Host: hypervisor.HypervisorHostname}).AllPages() + if err != nil { + return fmt.Errorf("unable to list servers %v", err) + } + extractedServers, err := servers.ExtractServers(page) + if err != nil { + return fmt.Errorf("unable to extract servers %v", err) + } + for _, server := range extractedServers { + if serverGather { + o.accumulateServer(acc, server, hypervisor.HypervisorHostname) + } + if !o.ServerDiagnotics || server.Status != "ACTIVE" { + continue + } + diagnostic, err := diagnostics.Get(o.compute, server.ID).Extract() + if err != nil { + acc.AddError(fmt.Errorf("unable to get diagnostics for server(%v) %v", server.ID, err)) + continue + } + o.diag[server.ID] = diagnostic + } + } + return nil +} + +// accumulateServices accumulates statistics of services. +func (o *OpenStack) accumulateServices(acc telegraf.Accumulator) { + for _, service := range o.openstackServices { + tags := map[string]string{ + "name": service.Type, + } + fields := map[string]interface{}{ + "service_id": service.ID, + "service_enabled": service.Enabled, + } + acc.AddFields("openstack_service", fields, tags) + } +} + +// accumulateServer accumulates statistics of a server. +func (o *OpenStack) accumulateServer(acc telegraf.Accumulator, server servers.Server, hostName string) { + tags := map[string]string{} + // Extract the flavor details to avoid joins (ignore errors and leave as zero values) + var vcpus, ram, disk int + if flavorIDInterface, ok := server.Flavor["id"]; ok { + if flavorID, ok := flavorIDInterface.(string); ok { + tags["flavor"] = flavorID + if flavor, ok := o.openstackFlavors[flavorID]; ok { + vcpus = flavor.VCPUs + ram = flavor.RAM + disk = flavor.Disk + } + } + } + if imageIDInterface, ok := server.Image["id"]; ok { + if imageID, ok := imageIDInterface.(string); ok { + tags["image"] = imageID + } + } + // Try derive the associated project + project := "unknown" + if p, ok := o.openstackProjects[server.TenantID]; ok { + project = p.Name + } + tags["tenant_id"] = server.TenantID + tags["name"] = server.Name + tags["host_id"] = server.HostID + tags["status"] = strings.ToLower(server.Status) + tags["key_name"] = server.KeyName + tags["host_name"] = hostName + tags["project"] = project + fields := map[string]interface{}{ + "id": server.ID, + "progress": server.Progress, + "accessIPv4": server.AccessIPv4, + "accessIPv6": server.AccessIPv6, + "addresses": len(server.Addresses), + "security_groups": len(server.SecurityGroups), + "volumes_attached": len(server.AttachedVolumes), + "fault_code": server.Fault.Code, + "fault_details": server.Fault.Details, + "fault_message": server.Fault.Message, + "vcpus": vcpus, + "ram_mb": ram, + "disk_gb": disk, + "fault_created": o.convertTimeFormat(server.Fault.Created), + "updated": o.convertTimeFormat(server.Updated), + "created": o.convertTimeFormat(server.Created), + } + if o.OutputSecrets { + tags["user_id"] = server.UserID + fields["adminPass"] = server.AdminPass + } + if len(server.AttachedVolumes) == 0 { + acc.AddFields("openstack_server", fields, tags) + } else { + for _, AttachedVolume := range server.AttachedVolumes { + fields["volume_id"] = AttachedVolume.ID + acc.AddFields("openstack_server", fields, tags) + } + } +} + +// accumulateServerDiagnostics accumulates statistics from the compute(nova) service. +// currently only supports 'libvirt' driver. +func (o *OpenStack) accumulateServerDiagnostics(acc telegraf.Accumulator) { + for serverID, diagnostic := range o.diag { + s, ok := diagnostic.(map[string]interface{}) + if !ok { + o.Log.Warnf("unknown type for diagnostics %T", diagnostic) + continue + } + tags := map[string]string{ + "server_id": serverID, + } + fields := map[string]interface{}{} + portName := make(map[string]bool) + storageName := make(map[string]bool) + memoryStats := make(map[string]interface{}) + for k, v := range s { + if typePort.MatchString(k) { + portName[strings.Split(k, "_")[0]] = true + } else if typeCPU.MatchString(k) { + fields[k] = v + } else if typeStorage.MatchString(k) { + storageName[strings.Split(k, "_")[0]] = true + } else { + memoryStats[k] = v + } + } + fields["memory"] = memoryStats["memory"] + fields["memory-actual"] = memoryStats["memory-actual"] + fields["memory-rss"] = memoryStats["memory-rss"] + fields["memory-swap_in"] = memoryStats["memory-swap_in"] + tags["no_of_ports"] = strconv.Itoa(len(portName)) + tags["no_of_disks"] = strconv.Itoa(len(storageName)) + for key := range storageName { + fields["disk_errors"] = s[key+"_errors"] + fields["disk_read"] = s[key+"_read"] + fields["disk_read_req"] = s[key+"_read_req"] + fields["disk_write"] = s[key+"_write"] + fields["disk_write_req"] = s[key+"_write_req"] + tags["disk_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + for key := range portName { + fields["port_rx"] = s[key+"_rx"] + fields["port_rx_drop"] = s[key+"_rx_drop"] + fields["port_rx_errors"] = s[key+"_rx_errors"] + fields["port_rx_packets"] = s[key+"_rx_packets"] + fields["port_tx"] = s[key+"_tx"] + fields["port_tx_drop"] = s[key+"_tx_drop"] + fields["port_tx_errors"] = s[key+"_tx_errors"] + fields["port_tx_packets"] = s[key+"_tx_packets"] + tags["port_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + } +} + +// init registers a callback which creates a new OpenStack input instance. +func init() { + inputs.Add("openstack", func() telegraf.Input { + return &OpenStack{ + Domain: "default", + Project: "admin", + TagPrefix: "openstack_tag_", + TagValue: "true", + } + }) +} diff --git a/plugins/inputs/opentelemetry/README.md b/plugins/inputs/opentelemetry/README.md index 20cc36d5d0403..0f83a469cd59c 100644 --- a/plugins/inputs/opentelemetry/README.md +++ b/plugins/inputs/opentelemetry/README.md @@ -2,7 +2,7 @@ This plugin receives traces, metrics and logs from [OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC. -### Configuration +## Configuration ```toml [[inputs.opentelemetry]] @@ -30,11 +30,11 @@ This plugin receives traces, metrics and logs from [OpenTelemetry](https://opent # tls_key = "/etc/telegraf/key.pem" ``` -#### Schema +### Schema The OpenTelemetry->InfluxDB conversion [schema](https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md) and [implementation](https://github.com/influxdata/influxdb-observability/tree/main/otel2influx) -are hosted at https://github.com/influxdata/influxdb-observability . +are hosted at . Spans are stored in measurement `spans`. Logs are stored in measurement `logs`. @@ -48,7 +48,8 @@ Also see the OpenTelemetry output plugin for Telegraf. ### Example Output #### Tracing Spans -``` + +```text spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 @@ -57,7 +58,8 @@ spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation ``` ### Metrics - `prometheus-v1` -``` + +```shell cpu_temp,foo=bar gauge=87.332 http_requests_total,method=post,code=200 counter=1027 http_requests_total,method=post,code=400 counter=3 @@ -66,7 +68,8 @@ rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560 ``` ### Metrics - `prometheus-v2` -``` + +```shell prometheus,foo=bar cpu_temp=87.332 prometheus,method=post,code=200 http_requests_total=1027 prometheus,method=post,code=400 http_requests_total=3 @@ -85,7 +88,8 @@ prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_s ``` ### Logs -``` + +```text logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 logs fluent.tag="fluent.info",worker=0i 1613769568896515100 diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go index 1c805e2a23ff2..437c723db3e28 100644 --- a/plugins/inputs/opentelemetry/grpc_services.go +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/influxdb-observability/common" "github.com/influxdata/influxdb-observability/otel2influx" "go.opentelemetry.io/collector/model/otlpgrpc" - "go.opentelemetry.io/collector/model/pdata" ) type traceService struct { @@ -15,6 +14,8 @@ type traceService struct { writer *writeToAccumulator } +var _ otlpgrpc.TracesServer = (*traceService)(nil) + func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { converter := otel2influx.NewOtelTracesToLineProtocol(logger) return &traceService{ @@ -23,8 +24,8 @@ func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceSer } } -func (s *traceService) Export(ctx context.Context, req pdata.Traces) (otlpgrpc.TracesResponse, error) { - err := s.converter.WriteTraces(ctx, req, s.writer) +func (s *traceService) Export(ctx context.Context, req otlpgrpc.TracesRequest) (otlpgrpc.TracesResponse, error) { + err := s.converter.WriteTraces(ctx, req.Traces(), s.writer) return otlpgrpc.NewTracesResponse(), err } @@ -33,6 +34,8 @@ type metricsService struct { writer *writeToAccumulator } +var _ otlpgrpc.MetricsServer = (*metricsService)(nil) + var metricsSchemata = map[string]common.MetricsSchema{ "prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1, "prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2, @@ -54,8 +57,8 @@ func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema }, nil } -func (s *metricsService) Export(ctx context.Context, req pdata.Metrics) (otlpgrpc.MetricsResponse, error) { - err := s.converter.WriteMetrics(ctx, req, s.writer) +func (s *metricsService) Export(ctx context.Context, req otlpgrpc.MetricsRequest) (otlpgrpc.MetricsResponse, error) { + err := s.converter.WriteMetrics(ctx, req.Metrics(), s.writer) return otlpgrpc.NewMetricsResponse(), err } @@ -64,6 +67,8 @@ type logsService struct { writer *writeToAccumulator } +var _ otlpgrpc.LogsServer = (*logsService)(nil) + func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { converter := otel2influx.NewOtelLogsToLineProtocol(logger) return &logsService{ @@ -72,7 +77,7 @@ func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsServi } } -func (s *logsService) Export(ctx context.Context, req pdata.Logs) (otlpgrpc.LogsResponse, error) { - err := s.converter.WriteLogs(ctx, req, s.writer) +func (s *logsService) Export(ctx context.Context, req otlpgrpc.LogsRequest) (otlpgrpc.LogsResponse, error) { + err := s.converter.WriteLogs(ctx, req.Logs(), s.writer) return otlpgrpc.NewLogsResponse(), err } diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go index 2de35bb06af50..4704d779dfd49 100644 --- a/plugins/inputs/opentelemetry/opentelemetry_test.go +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -5,10 +5,6 @@ import ( "net" "testing" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/metric" @@ -18,6 +14,10 @@ import ( "go.opentelemetry.io/otel/sdk/metric/selector/simple" "google.golang.org/grpc" "google.golang.org/grpc/test/bufconn" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" ) func TestOpenTelemetry(t *testing.T) { @@ -42,7 +42,7 @@ func TestOpenTelemetry(t *testing.T) { t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) pusher := controller.New( - processor.New( + processor.NewFactory( simple.NewWithExactDistribution(), metricExporter, ), @@ -53,7 +53,7 @@ func TestOpenTelemetry(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) - global.SetMeterProvider(pusher.MeterProvider()) + global.SetMeterProvider(pusher) // write metrics meter := global.Meter("library-name") @@ -72,12 +72,11 @@ func TestOpenTelemetry(t *testing.T) { // Check - assert.Empty(t, accumulator.Errors) + require.Empty(t, accumulator.Errors) - if assert.Len(t, accumulator.Metrics, 1) { - got := accumulator.Metrics[0] - assert.Equal(t, "measurement-counter", got.Measurement) - assert.Equal(t, telegraf.Counter, got.Type) - assert.Equal(t, "library-name", got.Tags["otel.library.name"]) - } + require.Len(t, accumulator.Metrics, 1) + got := accumulator.Metrics[0] + require.Equal(t, "measurement-counter", got.Measurement) + require.Equal(t, telegraf.Counter, got.Type) + require.Equal(t, "library-name", got.Tags["otel.library.name"]) } diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 85803f76ab046..8ee7dce2d5d8b 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -6,11 +6,11 @@ To use this plugin you will need an [api key][] (app_id). City identifiers can be found in the [city list][]. Alternately you can [search][] by name; the `city_id` can be found as the last digits -of the URL: https://openweathermap.org/city/2643743. Language +of the URL: . Language identifiers can be found in the [lang list][]. Documentation for condition ID, icon, and main is at [weather conditions][]. -### Configuration +## Configuration ```toml [[inputs.openweathermap]] @@ -44,7 +44,7 @@ condition ID, icon, and main is at [weather conditions][]. interval = "10m" ``` -### Metrics +## Metrics - weather - tags: @@ -66,10 +66,9 @@ condition ID, icon, and main is at [weather conditions][]. - condition_description (string, localized long description) - condition_icon +## Example Output -### Example Output - -``` +```shell > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000 > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000 > weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000 diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index fcc22343b435e..c4f2f4f032d7e 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -23,10 +23,10 @@ const ( // The limit of locations is 20. owmRequestSeveralCityID int = 20 - defaultBaseURL = "https://api.openweathermap.org/" - defaultResponseTimeout time.Duration = time.Second * 5 - defaultUnits string = "metric" - defaultLang string = "en" + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout = time.Second * 5 + defaultUnits = "metric" + defaultLang = "en" ) type OpenWeatherMap struct { @@ -38,8 +38,8 @@ type OpenWeatherMap struct { ResponseTimeout config.Duration `toml:"response_timeout"` Units string `toml:"units"` - client *http.Client - baseURL *url.URL + client *http.Client + baseParsedURL *url.URL } var sampleConfig = ` @@ -309,7 +309,7 @@ func init() { func (n *OpenWeatherMap) Init() error { var err error - n.baseURL, err = url.Parse(n.BaseURL) + n.baseParsedURL, err = url.Parse(n.BaseURL) if err != nil { return err } @@ -353,5 +353,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseURL.ResolveReference(relative).String() + return n.baseParsedURL.ResolveReference(relative).String() } diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md index 6821635103d78..4300fd362dc24 100644 --- a/plugins/inputs/passenger/README.md +++ b/plugins/inputs/passenger/README.md @@ -2,7 +2,7 @@ Gather [Phusion Passenger](https://www.phusionpassenger.com/) metrics using the `passenger-status` command line utility. -**Series Cardinality Warning** +## Series Cardinality Warning Depending on your environment, this `passenger_process` measurement of this plugin can quickly create a high number of series which, when unchecked, can @@ -20,7 +20,7 @@ manage your series cardinality: - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration +## Configuration ```toml # Read metrics of passenger using passenger-status @@ -36,11 +36,11 @@ manage your series cardinality: command = "passenger-status -v --show=xml" ``` -#### Permissions: +### Permissions Telegraf must have permission to execute the `passenger-status` command. On most systems, Telegraf runs as the `telegraf` user. -### Metrics: +## Metrics - passenger - tags: @@ -95,8 +95,9 @@ Telegraf must have permission to execute the `passenger-status` command. On mos - real_memory - vmsize -### Example Output: -``` +## Example Output + +```shell passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257 passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977 passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021 diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index ecbeeb532fd1e..5578b88b77525 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -8,7 +8,6 @@ import ( "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" @@ -49,7 +48,7 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) + require.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) } func Test_Invalid_Xml(t *testing.T) { @@ -65,7 +64,7 @@ func Test_Invalid_Xml(t *testing.T) { err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, "cannot parse input with error: EOF", err.Error()) + require.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli @@ -80,7 +79,7 @@ func Test_Default_Config_Load_Default_Command(t *testing.T) { err = r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") + require.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 9d4e2ad47c1b8..cef92498791e9 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -7,9 +7,9 @@ The pf plugin retrieves this information by invoking the `pfstat` command. The ` * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. * Configure sudo to grant telegraf to run `pfctl` as root. This is the most restrictive option, but require sudo setup. -* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. +* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. -### Using sudo +## Using sudo You may edit your sudo configuration with the following: @@ -17,40 +17,39 @@ You may edit your sudo configuration with the following: telegraf ALL=(root) NOPASSWD: /sbin/pfctl -s info ``` -### Configuration: +## Configuration ```toml # use sudo to run pfctl use_sudo = false ``` -### Measurements & Fields: +## Measurements & Fields +* pf + * entries (integer, count) + * searches (integer, count) + * inserts (integer, count) + * removals (integer, count) + * match (integer, count) + * bad-offset (integer, count) + * fragment (integer, count) + * short (integer, count) + * normalize (integer, count) + * memory (integer, count) + * bad-timestamp (integer, count) + * congestion (integer, count) + * ip-option (integer, count) + * proto-cksum (integer, count) + * state-mismatch (integer, count) + * state-insert (integer, count) + * state-limit (integer, count) + * src-limit (integer, count) + * synproxy (integer, count) -- pf - - entries (integer, count) - - searches (integer, count) - - inserts (integer, count) - - removals (integer, count) - - match (integer, count) - - bad-offset (integer, count) - - fragment (integer, count) - - short (integer, count) - - normalize (integer, count) - - memory (integer, count) - - bad-timestamp (integer, count) - - congestion (integer, count) - - ip-option (integer, count) - - proto-cksum (integer, count) - - state-mismatch (integer, count) - - state-insert (integer, count) - - state-limit (integer, count) - - src-limit (integer, count) - - synproxy (integer, count) +## Example Output -### Example Output: - -``` +```text > pfctl -s info Status: Enabled for 0 days 00:26:05 Debug: Urgent @@ -77,7 +76,7 @@ Counters synproxy 0 0.0/s ``` -``` +```shell > ./telegraf --config telegraf.conf --input-filter pf --test * Plugin: inputs.pf, Collection 1 > pf,host=columbia entries=3i,searches=2668i,inserts=12i,removals=9i 1510941775000000000 diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 53737a81ad098..abb7fcd35fde2 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -7,7 +7,7 @@ More information about the meaning of these metrics can be found in the - PgBouncer minimum tested version: 1.5 -### Configuration example +## Configuration example ```toml [[inputs.pgbouncer]] @@ -22,7 +22,7 @@ More information about the meaning of these metrics can be found in the address = "host=localhost user=pgbouncer sslmode=disable" ``` -#### `address` +### `address` Specify address via a postgresql connection string: @@ -37,7 +37,7 @@ All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. -### Metrics +## Metrics - pgbouncer - tags: @@ -57,7 +57,7 @@ This dbname is just for instantiating a connection with the server and doesn't r - total_xact_count - total_xact_time -+ pgbouncer_pools +- pgbouncer_pools - tags: - db - pool_mode @@ -74,9 +74,9 @@ This dbname is just for instantiating a connection with the server and doesn't r - sv_tested - sv_used -### Example Output +## Example Output -``` +```shell pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000 pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000 ``` diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 7dd75fb4ae487..2c9500260078c 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -4,10 +4,10 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { @@ -55,20 +55,20 @@ func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetricsPgBouncer { - assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + require.True(t, acc.HasInt64Field("pgbouncer", metric)) metricsCounted++ } for _, metric := range intMetricsPgBouncerPools { - assert.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) + require.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + require.True(t, acc.HasInt32Field("pgbouncer", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) } diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index b31f4b7e427bd..8e7a6960ccf40 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -2,7 +2,7 @@ Get phpfpm stats using either HTTP status page or fpm socket. -### Configuration: +## Configuration ```toml # Read metrics of phpfpm, via HTTP status page or socket @@ -44,7 +44,7 @@ Get phpfpm stats using either HTTP status page or fpm socket. When using `unixsocket`, you have to ensure that telegraf runs on same host, and socket path is accessible to telegraf user. -### Metrics: +## Metrics - phpfpm - tags: @@ -62,9 +62,9 @@ host, and socket path is accessible to telegraf user. - max_children_reached - slow_requests -# Example Output +## Example Output -``` +```shell phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187 phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422 phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658 diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 56fb38188fb75..b34b8a3063b52 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -33,26 +33,23 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { return fcgi, err } -func (c *conn) Request( - env map[string]string, - requestData string, -) (retout []byte, reterr []byte, err error) { +func (c *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { defer c.rwc.Close() var reqID uint16 = 1 err = c.writeBeginRequest(reqID, uint16(roleResponder), 0) if err != nil { - return + return nil, nil, err } err = c.writePairs(typeParams, reqID, env) if err != nil { - return + return nil, nil, err } if len(requestData) > 0 { if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil { - return + return nil, nil, err } } @@ -82,5 +79,5 @@ READ_LOOP: } } - return + return retout, reterr, err } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 77c4bf0aeee56..532567a2486fa 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -276,12 +276,12 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { func expandUrls(urls []string) ([]string, error) { addrs := make([]string, 0, len(urls)) - for _, url := range urls { - if isNetworkURL(url) { - addrs = append(addrs, url) + for _, address := range urls { + if isNetworkURL(address) { + addrs = append(addrs, address) continue } - paths, err := globUnixSocket(url) + paths, err := globUnixSocket(address) if err != nil { return nil, err } @@ -290,8 +290,8 @@ func expandUrls(urls []string) ([]string, error) { return addrs, nil } -func globUnixSocket(url string) ([]string, error) { - pattern, status := unixSocketPaths(url) +func globUnixSocket(address string) ([]string, error) { + pattern, status := unixSocketPaths(address) glob, err := globpath.Compile(pattern) if err != nil { return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err) @@ -312,9 +312,7 @@ func globUnixSocket(url string) ([]string, error) { return addresses, nil } -func unixSocketPaths(addr string) (string, string) { - var socketPath, statusPath string - +func unixSocketPaths(addr string) (socketPath string, statusPath string) { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { socketPath = socketAddr[0] diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index d51c576aad7f0..cf207fec901d6 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -16,9 +16,9 @@ import ( "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -283,7 +283,7 @@ func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "/status") + require.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { @@ -297,8 +297,8 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) - assert.Contains(t, err.Error(), `lookup aninvalidone`) + require.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) + require.Contains(t, err.Error(), `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { @@ -312,7 +312,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) + require.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } const outputSample = ` diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 10744a9b15e99..03ab366933678 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -13,7 +13,8 @@ ping packets. Most ping command implementations are supported, one notable exception being that there is currently no support for GNU Inetutils ping. You may instead use the iputils-ping implementation: -``` + +```sh apt-get install iputils-ping ``` @@ -21,7 +22,7 @@ When using `method = "native"` a ping is sent and the results are reported in native Go by the Telegraf process, eliminating the need to execute the system `ping` command. -### Configuration: +## Configuration ```toml [[inputs.ping]] @@ -76,7 +77,7 @@ native Go by the Telegraf process, eliminating the need to execute the system # size = 56 ``` -#### File Limit +### File Limit Since this plugin runs the ping command, it may need to open multiple files per host. The number of files used is lessened with the `native` option but still @@ -88,42 +89,49 @@ use the "drop-in directory", usually located at `/etc/systemd/system/telegraf.service.d`. You can create or edit a drop-in file in the correct location using: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` Increase the number of open files: + ```ini [Service] LimitNOFILE=8192 ``` Restart Telegraf: + ```sh -$ systemctl restart telegraf +systemctl restart telegraf ``` -#### Linux Permissions +### Linux Permissions When using `method = "native"`, Telegraf will attempt to use privileged raw ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities or for Telegraf to be run as root. With systemd: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` + ```ini [Service] CapabilityBoundingSet=CAP_NET_RAW AmbientCapabilities=CAP_NET_RAW ``` + ```sh -$ systemctl restart telegraf +systemctl restart telegraf ``` Without systemd: + ```sh -$ setcap cap_net_raw=eip /usr/bin/telegraf +setcap cap_net_raw=eip /usr/bin/telegraf ``` Reference [`man 7 capabilities`][man 7 capabilities] for more information about @@ -131,11 +139,11 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -#### Other OS Permissions +### Other OS Permissions -When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. +When using `method = "native"`, you will need permissions similar to the executable ping program for your OS. -### Metrics +## Metrics - ping - tags: @@ -155,19 +163,18 @@ When using `method = "native"`, you will need permissions similar to the executa - percent_reply_loss (float, Windows with method = "exec" only) - result_code (int, success = 0, no such host = 1, ping error = 2) -##### reply_received vs packets_received +### reply_received vs packets_received On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*. -##### ttl +### ttl There is currently no support for TTL on windows with `"native"`; track -progress at https://github.com/golang/go/issues/7175 and -https://github.com/golang/go/issues/7174 - +progress at and + -### Example Output +## Example Output -``` +```shell ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 7d3b05178ad0b..60f3aaf414b74 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -13,6 +13,7 @@ import ( "time" "github.com/go-ping/ping" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -82,6 +83,20 @@ type Ping struct { Size *int } +type roundTripTimeStats struct { + min float64 + avg float64 + max float64 + stddev float64 +} + +type stats struct { + trans int + recv int + ttl int + roundTripTimeStats +} + func (*Ping) Description() string { return "Ping given url(s) and return statistics" } @@ -262,7 +277,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { sort.Sort(durationSlice(stats.Rtts)) for _, perc := range p.Percentiles { - var value = percentile(durationSlice(stats.Rtts), perc) + var value = percentile(stats.Rtts, perc) var field = fmt.Sprintf("percentile%v_ms", perc) fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) } @@ -273,6 +288,7 @@ func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { fields["ttl"] = stats.ttl } + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["percent_packet_loss"] = float64(stats.PacketLoss) fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond) fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond) diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index f6bd751c2a4e3..c09c4a3fcd359 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -57,7 +57,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { return } } - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) + stats, err := processPingOutput(out) if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) @@ -67,25 +67,25 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { } // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 + loss := float64(stats.trans-stats.recv) / float64(stats.trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec + fields["packets_transmitted"] = stats.trans + fields["packets_received"] = stats.recv fields["percent_packet_loss"] = loss - if ttl >= 0 { - fields["ttl"] = ttl + if stats.ttl >= 0 { + fields["ttl"] = stats.ttl } - if min >= 0 { - fields["minimum_response_ms"] = min + if stats.min >= 0 { + fields["minimum_response_ms"] = stats.min } - if avg >= 0 { - fields["average_response_ms"] = avg + if stats.avg >= 0 { + fields["average_response_ms"] = stats.avg } - if max >= 0 { - fields["maximum_response_ms"] = max + if stats.max >= 0 { + fields["maximum_response_ms"] = stats.max } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev + if stats.stddev >= 0 { + fields["standard_deviation_ms"] = stats.stddev } acc.AddFields("ping", fields, tags) } @@ -165,36 +165,47 @@ func (p *Ping) args(url string, system string) []string { // round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms // // It returns (, , ) -func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { - var trans, recv, ttl int = 0, 0, -1 - var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 +func processPingOutput(out string) (stats, error) { + stats := stats{ + trans: 0, + recv: 0, + ttl: -1, + roundTripTimeStats: roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + }, + } + // Set this error to nil if we find a 'transmitted' line - err := errors.New("Fatal error processing ping output") + err := errors.New("fatal error processing ping output") lines := strings.Split(out, "\n") for _, line := range lines { // Reading only first TTL, ignoring other TTL messages - if ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { - ttl, err = getTTL(line) - } else if strings.Contains(line, "transmitted") && - strings.Contains(line, "received") { - trans, recv, err = getPacketStats(line, trans, recv) + if stats.ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { + stats.ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && strings.Contains(line, "received") { + stats.trans, stats.recv, err = getPacketStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } else if strings.Contains(line, "min/avg/max") { - min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) + stats.roundTripTimeStats, err = checkRoundTripTimeStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } } - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } -func getPacketStats(line string, trans, recv int) (int, int, error) { +func getPacketStats(line string) (trans int, recv int, err error) { + trans, recv = 0, 0 + stats := strings.Split(line, ", ") // Transmitted packets - trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) if err != nil { return trans, recv, err } @@ -209,28 +220,35 @@ func getTTL(line string) (int, error) { return strconv.Atoi(ttlMatch[2]) } -func checkRoundTripTimeStats(line string, min, avg, max, - stddev float64) (float64, float64, float64, float64, error) { +func checkRoundTripTimeStats(line string) (roundTripTimeStats, error) { + roundTripTimeStats := roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + } + stats := strings.Split(line, " ")[3] data := strings.Split(stats, "/") - min, err := strconv.ParseFloat(data[0], 64) + var err error + roundTripTimeStats.min, err = strconv.ParseFloat(data[0], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - avg, err = strconv.ParseFloat(data[1], 64) + roundTripTimeStats.avg, err = strconv.ParseFloat(data[1], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - max, err = strconv.ParseFloat(data[2], 64) + roundTripTimeStats.max, err = strconv.ParseFloat(data[2], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) + roundTripTimeStats.stddev, err = strconv.ParseFloat(data[3], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } } - return min, avg, max, stddev, err + return roundTripTimeStats, err } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 7faba097c4562..94a65075e651a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -12,10 +12,10 @@ import ( "time" "github.com/go-ping/ping" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // BSD/Darwin ping output @@ -80,45 +80,45 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) - assert.NoError(t, err) - assert.Equal(t, 55, ttl, "ttl value is 55") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 15.087, min, 0.001) - assert.InDelta(t, 20.224, avg, 0.001) - assert.InDelta(t, 27.263, max, 0.001) - assert.InDelta(t, 4.076, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(freebsdPing6Output) - assert.NoError(t, err) - assert.Equal(t, 117, ttl, "ttl value is 117") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.727, min, 0.001) - assert.InDelta(t, 53.211, avg, 0.001) - assert.InDelta(t, 93.870, max, 0.001) - assert.InDelta(t, 22.000, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 56, ttl, "ttl value is 56") - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, rec, "4 packets were received") - assert.InDelta(t, 15.810, min, 0.001) - assert.InDelta(t, 17.611, avg, 0.001) - assert.InDelta(t, 22.559, max, 0.001) - assert.InDelta(t, -1.0, stddev, 0.001) + stats, err := processPingOutput(bsdPingOutput) + require.NoError(t, err) + require.Equal(t, 55, stats.ttl, "ttl value is 55") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 15.087, stats.min, 0.001) + require.InDelta(t, 20.224, stats.avg, 0.001) + require.InDelta(t, 27.263, stats.max, 0.001) + require.InDelta(t, 4.076, stats.stddev, 0.001) + + stats, err = processPingOutput(freebsdPing6Output) + require.NoError(t, err) + require.Equal(t, 117, stats.ttl, "ttl value is 117") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.727, stats.min, 0.001) + require.InDelta(t, 53.211, stats.avg, 0.001) + require.InDelta(t, 93.870, stats.max, 0.001) + require.InDelta(t, 22.000, stats.stddev, 0.001) + + stats, err = processPingOutput(linuxPingOutput) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) + + stats, err = processPingOutput(busyBoxPingOutput) + require.NoError(t, err) + require.Equal(t, 56, stats.ttl, "ttl value is 56") + require.Equal(t, 4, stats.trans, "4 packets were transmitted") + require.Equal(t, 4, stats.recv, "4 packets were received") + require.InDelta(t, 15.810, stats.min, 0.001) + require.InDelta(t, 17.611, stats.avg, 0.001) + require.InDelta(t, 22.559, stats.max, 0.001) + require.InDelta(t, -1.0, stats.stddev, 0.001) } // Linux ping output with varying TTL @@ -137,22 +137,22 @@ rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms // Test that ping command output is processed properly func TestProcessPingOutputWithVaryingTTL(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were transmitted") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) + stats, err := processPingOutput(linuxPingOutputWithVaryingTTL) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were transmitted") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) } // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) - assert.Error(t, err, "Error was expected from processPingOutput") + _, err := processPingOutput(fatalPingOutput) + require.Error(t, err, "Error was expected from processPingOutput") } // Test that default arg lists are created correctly @@ -350,7 +350,7 @@ func TestBadPingGather(t *testing.T) { } func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) { - return fatalPingOutput, errors.New("So very bad") + return fatalPingOutput, errors.New("so very bad") } // Test that a fatal ping command does not gather any statistics. @@ -363,20 +363,20 @@ func TestFatalPingGather(t *testing.T) { err := acc.GatherError(p.Gather) require.Error(t, err) - require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, So very bad") - assert.False(t, acc.HasMeasurement("packets_transmitted"), + require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, so very bad") + require.False(t, acc.HasMeasurement("packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("packets_received"), + require.False(t, acc.HasMeasurement("packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("percent_packet_loss"), + require.False(t, acc.HasMeasurement("percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("ttl"), + require.False(t, acc.HasMeasurement("ttl"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("minimum_response_ms"), + require.False(t, acc.HasMeasurement("minimum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("average_response_ms"), + require.False(t, acc.HasMeasurement("average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("maximum_response_ms"), + require.False(t, acc.HasMeasurement("maximum_response_ms"), "Fatal ping should not have packet measurements") } @@ -385,8 +385,8 @@ func TestErrorWithHostNamePingGather(t *testing.T) { out string error error }{ - {"", errors.New("host www.amazon.com: So very bad")}, - {"so bad", errors.New("host www.amazon.com: so bad, So very bad")}, + {"", errors.New("host www.amazon.com: so very bad")}, + {"so bad", errors.New("host www.amazon.com: so bad, so very bad")}, } for _, param := range params { @@ -394,12 +394,12 @@ func TestErrorWithHostNamePingGather(t *testing.T) { p := Ping{ Urls: []string{"www.amazon.com"}, pingHost: func(binary string, timeout float64, args ...string) (string, error) { - return param.out, errors.New("So very bad") + return param.out, errors.New("so very bad") }, } require.Error(t, acc.GatherError(p.Gather)) - assert.True(t, len(acc.Errors) > 0) - assert.Contains(t, acc.Errors, param.error) + require.True(t, len(acc.Errors) > 0) + require.Contains(t, acc.Errors, param.error) } } @@ -409,13 +409,13 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } err := acc.GatherError(p.Gather) require.Error(t, err) - require.EqualValues(t, err.Error(), "Fatal error processing ping output: www.google.com") + require.EqualValues(t, err.Error(), "fatal error processing ping output: www.google.com") } // Test that Gather function works using native ping @@ -469,19 +469,19 @@ func TestPingGatherNative(t *testing.T) { var acc testutil.Accumulator require.NoError(t, tc.P.Init()) require.NoError(t, acc.GatherError(tc.P.Gather)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) - assert.True(t, acc.HasField("ping", "percentile50_ms")) - assert.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) - assert.True(t, acc.HasField("ping", "percentile95_ms")) - assert.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) - assert.True(t, acc.HasField("ping", "percentile99_ms")) - assert.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) - assert.True(t, acc.HasField("ping", "percent_packet_loss")) - assert.True(t, acc.HasField("ping", "minimum_response_ms")) - assert.True(t, acc.HasField("ping", "average_response_ms")) - assert.True(t, acc.HasField("ping", "maximum_response_ms")) - assert.True(t, acc.HasField("ping", "standard_deviation_ms")) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + require.True(t, acc.HasField("ping", "percentile50_ms")) + require.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) + require.True(t, acc.HasField("ping", "percentile95_ms")) + require.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) + require.True(t, acc.HasField("ping", "percentile99_ms")) + require.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) + require.True(t, acc.HasField("ping", "percent_packet_loss")) + require.True(t, acc.HasField("ping", "minimum_response_ms")) + require.True(t, acc.HasField("ping", "average_response_ms")) + require.True(t, acc.HasField("ping", "maximum_response_ms")) + require.True(t, acc.HasField("ping", "standard_deviation_ms")) } } diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 6df8af3732a5f..77137b1700ef6 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -8,9 +8,9 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // Windows ping format ( should support multilanguage ?) @@ -44,22 +44,22 @@ Approximate round trip times in milli-seconds: func TestHost(t *testing.T) { trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 46, min, "Min 46") - assert.Equal(t, 57, max, "max 57") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 46, min, "Min 46") + require.Equal(t, 57, max, "max 57") trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 50, min, "Min 50") - assert.Equal(t, 52, max, "Max 52") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 50, min, "Min 50") + require.Equal(t, 52, max, "Max 52") } func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { @@ -239,21 +239,21 @@ func TestFatalPingGather(t *testing.T) { } acc.GatherError(p.Gather) - assert.True(t, acc.HasFloatField("ping", "errors"), + require.True(t, acc.HasFloatField("ping", "errors"), "Fatal ping should have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_transmitted"), + require.False(t, acc.HasInt64Field("ping", "packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_received"), + require.False(t, acc.HasInt64Field("ping", "packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"), + require.False(t, acc.HasFloatField("ping", "percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"), + require.False(t, acc.HasFloatField("ping", "percent_reply_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -297,13 +297,13 @@ func TestUnreachablePingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -345,13 +345,13 @@ func TestTTLExpiredPingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -362,7 +362,7 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 2fdfacd9d193c..5d42c881db487 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -3,11 +3,11 @@ The postfix plugin reports metrics on the postfix queues. For each of the active, hold, incoming, maildrop, and deferred queues -(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue +(), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds). -### Configuration +## Configuration ```toml [[inputs.postfix]] @@ -16,7 +16,7 @@ item in seconds). # queue_directory = "/var/spool/postfix" ``` -#### Permissions +### Permissions Telegraf will need read access to the files in the queue directory. You may need to alter the permissions of these directories to provide access to the @@ -26,20 +26,22 @@ This can be setup either using standard unix permissions or with Posix ACLs, you will only need to use one method: Unix permissions: + ```sh -$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo usermod -a -G postdrop telegraf -$ sudo chmod g+r /var/spool/postfix/maildrop +sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} +sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} +sudo usermod -a -G postdrop telegraf +sudo chmod g+r /var/spool/postfix/maildrop ``` Posix ACL: + ```sh -$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ -$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ ``` -### Metrics +## Metrics - postfix_queue - tags: @@ -49,10 +51,9 @@ $ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ - size (integer, bytes) - age (integer, seconds) +## Example Output -### Example Output - -``` +```shell postfix_queue,queue=active length=3,size=12345,age=9 postfix_queue,queue=hold length=0,size=0,age=0 postfix_queue,queue=maildrop length=1,size=2000,age=2 diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index e2d271f51cba1..444313b7d6885 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -33,9 +33,10 @@ func getQueueDirectory() (string, error) { return strings.TrimSpace(string(qd)), nil } -func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { +func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error) { var length, size int64 var oldest time.Time + err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error { if err != nil { acc.AddError(fmt.Errorf("error scanning %s: %s", path, err)) @@ -57,9 +58,11 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { } return nil }) + if err != nil { - return 0, 0, 0, err + return nil, err } + var age int64 if !oldest.IsZero() { age = int64(time.Since(oldest) / time.Second) @@ -67,7 +70,13 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { // system doesn't support ctime age = -1 } - return length, size, age, nil + + fields := map[string]interface{}{"length": length, "size": size} + if age != -1 { + fields["age"] = age + } + + return fields, nil } type Postfix struct { @@ -84,15 +93,12 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error { } for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { - length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc) + fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) if err != nil { acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err)) continue } - fields := map[string]interface{}{"length": length, "size": size} - if age != -1 { - fields["age"] = age - } + acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) } diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 6ab6556a0cf07..e3032469c615a 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -8,9 +8,9 @@ import ( "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGather(t *testing.T) { @@ -41,20 +41,20 @@ func TestGather(t *testing.T) { metrics[m.Tags["queue"]] = m } - assert.Equal(t, int64(2), metrics["active"].Fields["length"]) - assert.Equal(t, int64(7), metrics["active"].Fields["size"]) - assert.InDelta(t, 0, metrics["active"].Fields["age"], 10) + require.Equal(t, int64(2), metrics["active"].Fields["length"]) + require.Equal(t, int64(7), metrics["active"].Fields["size"]) + require.InDelta(t, 0, metrics["active"].Fields["age"], 10) - assert.Equal(t, int64(1), metrics["hold"].Fields["length"]) - assert.Equal(t, int64(3), metrics["hold"].Fields["size"]) + require.Equal(t, int64(1), metrics["hold"].Fields["length"]) + require.Equal(t, int64(3), metrics["hold"].Fields["size"]) - assert.Equal(t, int64(1), metrics["incoming"].Fields["length"]) - assert.Equal(t, int64(4), metrics["incoming"].Fields["size"]) + require.Equal(t, int64(1), metrics["incoming"].Fields["length"]) + require.Equal(t, int64(4), metrics["incoming"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) - assert.Equal(t, int64(2), metrics["deferred"].Fields["length"]) - assert.Equal(t, int64(6), metrics["deferred"].Fields["size"]) + require.Equal(t, int64(2), metrics["deferred"].Fields["length"]) + require.Equal(t, int64(6), metrics["deferred"].Fields["size"]) } diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index 627fd2dbbfa88..d6771ade60b44 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -1,7 +1,8 @@ # PostgreSQL Input Plugin This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on your version of postgres. See table: -``` + +```sh pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) --- --- --- ------- ------- ------- datid x x x x @@ -27,10 +28,10 @@ stats_reset* x x _* value ignored and therefore not recorded._ - More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) ## Configuration + Specify address via a postgresql connection string: `host=localhost port=5432 user=telegraf database=telegraf` @@ -52,11 +53,13 @@ A list of databases to pull metrics about. If not specified, metrics for all dat ### TLS Configuration Add the `sslkey`, `sslcert` and `sslrootcert` options to your DSN: -``` + +```shell host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem ``` ### Configuration example + ```toml [[inputs.postgresql]] address = "postgres://telegraf@localhost/someDB" diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 934d06414b7e6..30cf776eb0e0a 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { @@ -71,27 +71,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { @@ -117,7 +117,7 @@ func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { point, ok := acc.Get("postgresql") require.True(t, ok) - assert.Equal(t, "postgres", point.Tags["db"]) + require.Equal(t, "postgres", point.Tags["db"]) } func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { @@ -150,7 +150,7 @@ func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { } } - assert.True(t, found) + require.True(t, found) } func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { @@ -172,7 +172,7 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { require.NoError(t, p.Gather(&acc)) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } @@ -212,8 +212,8 @@ func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) { } } - assert.True(t, foundTemplate0) - assert.False(t, foundTemplate1) + require.True(t, foundTemplate0) + require.False(t, foundTemplate1) } func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { @@ -251,6 +251,6 @@ func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { } } - assert.False(t, foundTemplate0) - assert.True(t, foundTemplate1) + require.False(t, foundTemplate0) + require.True(t, foundTemplate1) } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index e0793d4d2dbd6..e765316b007d3 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -142,7 +142,7 @@ func (p *Service) Stop() { p.DB.Close() } -var kvMatcher, _ = regexp.Compile("(password|sslcert|sslkey|sslmode|sslrootcert)=\\S+ ?") +var kvMatcher, _ = regexp.Compile(`(password|sslcert|sslkey|sslmode|sslrootcert)=\S+ ?`) // SanitizedAddress utility function to strip sensitive information from the connection string. func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 70464140aedf4..7afddbfdee7f9 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -78,9 +78,11 @@ The example below has two queries are specified, with the following parameters: The system can be easily extended using homemade metrics collection tools or using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) -# Sample Queries : -- telegraf.conf postgresql_extensible queries (assuming that you have configured +## Sample Queries + +* telegraf.conf postgresql_extensible queries (assuming that you have configured correctly your connection) + ```toml [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" @@ -132,27 +134,33 @@ using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs tagvalue="type,enabled" ``` -# Postgresql Side +## Postgresql Side + postgresql.conf : -``` + +```sql shared_preload_libraries = 'pg_stat_statements,pg_stat_kcache' ``` Please follow the requirements to setup those extensions. In the database (can be a specific monitoring db) -``` + +```sql create extension pg_stat_statements; create extension pg_stat_kcache; create extension pg_proctab; ``` + (assuming that the extension is installed on the OS Layer) - - pg_stat_kcache is available on the postgresql.org yum repo - - pg_proctab is available at : https://github.com/markwkm/pg_proctab +* pg_stat_kcache is available on the postgresql.org yum repo +* pg_proctab is available at : + +## Views + +* Blocking sessions - ## Views - - Blocking sessions ```sql CREATE OR REPLACE VIEW public.blocking_procs AS SELECT a.datname AS db, @@ -176,7 +184,9 @@ CREATE OR REPLACE VIEW public.blocking_procs AS WHERE kl.granted AND NOT bl.granted ORDER BY a.query_start; ``` - - Sessions Statistics + +* Sessions Statistics + ```sql CREATE OR REPLACE VIEW public.sessions AS WITH proctab AS ( diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 176827a4b1dc7..bb776abdc3c8b 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -161,10 +161,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { queryAddon string dbVersion int query string - tagValue string measName string - timestamp string - columns []string ) // Retrieving the database version @@ -177,8 +174,6 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // Query is not run if Database version does not match the query version. for i := range p.Query { sqlQuery = p.Query[i].Sqlquery - tagValue = p.Query[i].Tagvalue - timestamp = p.Query[i].Timestamp if p.Query[i].Measurement != "" { measName = p.Query[i].Measurement @@ -198,40 +193,46 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { sqlQuery += queryAddon if p.Query[i].Version <= dbVersion { - rows, err := p.DB.Query(sqlQuery) - if err != nil { - p.Log.Error(err.Error()) - continue - } + p.gatherMetricsFromQuery(acc, sqlQuery, p.Query[i].Tagvalue, p.Query[i].Timestamp, measName) + } + } + return nil +} - defer rows.Close() +func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, sqlQuery string, tagValue string, timestamp string, measName string) { + var columns []string - // grab the column information from the result - if columns, err = rows.Columns(); err != nil { - p.Log.Error(err.Error()) - continue - } + rows, err := p.DB.Query(sqlQuery) + if err != nil { + acc.AddError(err) + return + } - p.AdditionalTags = nil - if tagValue != "" { - tagList := strings.Split(tagValue, ",") - for t := range tagList { - p.AdditionalTags = append(p.AdditionalTags, tagList[t]) - } - } + defer rows.Close() - p.Timestamp = timestamp + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + acc.AddError(err) + return + } - for rows.Next() { - err = p.accRow(measName, rows, acc, columns) - if err != nil { - p.Log.Error(err.Error()) - break - } - } + p.AdditionalTags = nil + if tagValue != "" { + tagList := strings.Split(tagValue, ",") + for t := range tagList { + p.AdditionalTags = append(p.AdditionalTags, tagList[t]) + } + } + + p.Timestamp = timestamp + + for rows.Next() { + err = p.accRow(measName, rows, acc, columns) + if err != nil { + acc.AddError(err) + break } } - return nil } type scanner interface { diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 399c236bffcea..fbcc7e1e8a7e2 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func queryRunner(t *testing.T, q query) *testutil.Accumulator { @@ -76,27 +76,27 @@ func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { @@ -109,30 +109,30 @@ func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { examples := map[string]func(*testutil.Accumulator){ "SELECT 10.0::float AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.FloatField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, 10.0, v) + require.True(t, found) + require.Equal(t, 10.0, v) }, "SELECT 10.0 AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "10.0", v) + require.True(t, found) + require.Equal(t, "10.0", v) }, "SELECT 'hello world' AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "hello world", v) + require.True(t, found) + require.Equal(t, "hello world", v) }, "SELECT true AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.BoolField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, true, v) + require.True(t, found) + require.Equal(t, true, v) }, "SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) { expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC) v, found := acc.BoolField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, true, v) - assert.True(t, acc.HasTimestamp(measurement, expectedTime)) + require.True(t, found) + require.Equal(t, true, v) + require.True(t, acc.HasTimestamp(measurement, expectedTime)) }, } @@ -192,22 +192,22 @@ func TestPostgresqlFieldOutputIntegration(t *testing.T) { for _, field := range intMetrics { _, found := acc.Int64Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) } for _, field := range int32Metrics { _, found := acc.Int32Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) } for _, field := range floatMetrics { _, found := acc.FloatField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) } for _, field := range stringMetrics { _, found := acc.StringField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a str", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a str", field)) } } @@ -256,9 +256,9 @@ func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) - assert.NotEmpty(t, p.IgnoredColumns()) + require.NotEmpty(t, p.IgnoredColumns()) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index a6bad660fc37b..160c3d6d26849 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -2,7 +2,7 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. -### Configuration: +## Configuration ```toml # Description @@ -14,17 +14,18 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ``` -#### Permissions +### Permissions Telegraf will need read access to the powerdns control socket. On many systems this can be accomplished by adding the `telegraf` user to the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` -### Measurements & Fields: +## Measurements & Fields - powerdns - corrupt-packets @@ -66,13 +67,13 @@ usermod telegraf -a -G pdns - uptime - user-msec -### Tags: +## Tags - tags: `server=socket` -### Example Output: +## Example Output -``` +```sh $ ./telegraf --config telegraf.conf --input-filter powerdns --test > powerdns,server=/var/run/pdns.controlsocket corrupt-packets=0i,deferred-cache-inserts=0i,deferred-cache-lookup=0i,dnsupdate-answers=0i,dnsupdate-changes=0i,dnsupdate-queries=0i,dnsupdate-refused=0i,key-cache-size=0i,latency=26i,meta-cache-size=0i,packetcache-hit=0i,packetcache-miss=1i,packetcache-size=0i,qsize-q=0i,query-cache-hit=0i,query-cache-miss=6i,rd-queries=1i,recursing-answers=0i,recursing-questions=0i,recursion-unanswered=0i,security-status=3i,servfail-packets=0i,signature-cache-size=0i,signatures=0i,sys-msec=4349i,tcp-answers=0i,tcp-queries=0i,timedout-packets=0i,udp-answers=1i,udp-answers-bytes=50i,udp-do-queries=0i,udp-queries=0i,udp4-answers=1i,udp4-queries=1i,udp6-answers=0i,udp6-queries=0i,uptime=166738i,user-msec=3036i 1454078624932715706 ``` diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 5421c926a7745..196b0c12dd49f 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -4,7 +4,6 @@ import ( "bufio" "fmt" "io" - "log" "net" "strconv" "strings" @@ -16,6 +15,8 @@ import ( type Powerdns struct { UnixSockets []string + + Log telegraf.Logger `toml:"-"` } var sampleConfig = ` @@ -89,7 +90,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -99,7 +100,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error return nil } -func parseResponse(metrics string) map[string]interface{} { +func (p *Powerdns) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, ",") @@ -112,8 +113,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index bf7d3845f7dc9..5afa9008ae124 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -7,7 +7,6 @@ import ( "path/filepath" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" @@ -108,12 +107,16 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { "meta-cache-size", "qsize-q", "signature-cache-size", "sys-msec", "uptime", "user-msec"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns", metric), metric) + require.True(t, acc.HasInt64Field("powerdns", metric), metric) } } func TestPowerdnsParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -173,7 +176,11 @@ func TestPowerdnsParseMetrics(t *testing.T) { } func TestPowerdnsParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -232,7 +239,11 @@ func TestPowerdnsParseCorruptMetrics(t *testing.T) { } func TestPowerdnsParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md index 09192db35ad2b..5cb8347f87571 100644 --- a/plugins/inputs/powerdns_recursor/README.md +++ b/plugins/inputs/powerdns_recursor/README.md @@ -3,7 +3,7 @@ The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using the unix controlsocket. -### Configuration +## Configuration ```toml [[inputs.powerdns_recursor]] @@ -17,7 +17,7 @@ the unix controlsocket. # socket_mode = "0666" ``` -#### Permissions +### Permissions Telegraf will need read/write access to the control socket and to the `socket_dir`. PowerDNS will need to be able to write to the `socket_dir`. @@ -27,25 +27,28 @@ adapted for other systems. First change permissions on the controlsocket in the PowerDNS recursor configuration, usually in `/etc/powerdns/recursor.conf`: -``` + +```sh socket-mode = 660 ``` Then place the `telegraf` user into the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` Since `telegraf` cannot write to to the default `/var/run` socket directory, create a subdirectory and adjust permissions for this directory so that both users can access it. + ```sh -$ mkdir /var/run/pdns -$ chown root:pdns /var/run/pdns -$ chmod 770 /var/run/pdns +mkdir /var/run/pdns +chown root:pdns /var/run/pdns +chmod 770 /var/run/pdns ``` -### Metrics +## Metrics - powerdns_recursor - tags: @@ -156,8 +159,8 @@ $ chmod 770 /var/run/pdns - x-ourtime4-8 - x-ourtime8-16 -### Example Output +## Example Output -``` +```shell powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 ``` diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index 190297f9f58a1..bc7ebc5b777f1 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -4,7 +4,6 @@ import ( "bufio" "errors" "fmt" - "log" "math/rand" "net" "os" @@ -22,6 +21,8 @@ type PowerdnsRecursor struct { SocketDir string `toml:"socket_dir"` SocketMode string `toml:"socket_mode"` + Log telegraf.Logger `toml:"-"` + mode uint32 } @@ -125,7 +126,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -135,7 +136,7 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator return conn.Close() } -func parseResponse(metrics string) map[string]interface{} { +func (p *PowerdnsRecursor) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, "\n") @@ -148,8 +149,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index e715fe4e2d165..a4fe9586cd8df 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + @@ -183,12 +183,16 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) + require.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) } } func TestPowerdnsRecursorParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -302,15 +306,17 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -423,15 +429,17 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string @@ -544,9 +552,7 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !assert.Truef(t, ok, "Did not find key for metric %s in values", test.key) { - continue - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md index 756326d75246d..ac561f9361660 100644 --- a/plugins/inputs/processes/README.md +++ b/plugins/inputs/processes/README.md @@ -8,7 +8,7 @@ it requires access to execute `ps`. **Supported Platforms**: Linux, FreeBSD, Darwin -### Configuration +## Configuration ```toml # Get the number of processes and group them by status @@ -21,7 +21,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info `docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc` -### Metrics +## Metrics - processes - fields: @@ -38,13 +38,13 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info - parked (linux only) - total_threads (linux only) -### Process State Mappings +## Process State Mappings Different OSes use slightly different State codes for their processes, these state codes are documented in `man ps`, and I will give a mapping of what major OS state codes correspond to in telegraf metrics: -``` +```sh Linux FreeBSD Darwin meaning R R R running S S S sleeping @@ -56,8 +56,8 @@ Linux FreeBSD Darwin meaning W W none paging (linux kernel < 2.6 only), wait (freebsd) ``` -### Example Output +## Example Output -``` +```shell processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index 144b80f3fc1ec..7fc0a76dac036 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestProcesses(t *testing.T) { @@ -27,13 +27,13 @@ func TestProcesses(t *testing.T) { err := processes.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("processes", "running")) - assert.True(t, acc.HasInt64Field("processes", "sleeping")) - assert.True(t, acc.HasInt64Field("processes", "stopped")) - assert.True(t, acc.HasInt64Field("processes", "total")) + require.True(t, acc.HasInt64Field("processes", "running")) + require.True(t, acc.HasInt64Field("processes", "sleeping")) + require.True(t, acc.HasInt64Field("processes", "stopped")) + require.True(t, acc.HasInt64Field("processes", "total")) total, ok := acc.Get("processes") require.True(t, ok) - assert.True(t, total.Fields["total"].(int64) > 0) + require.True(t, total.Fields["total"].(int64) > 0) } func TestFromPS(t *testing.T) { diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index f0b9858601ade..60d213cd0c50d 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -5,6 +5,7 @@ The procstat_lookup metric displays the query information, specifically the number of PIDs returned on a search Processes can be selected for monitoring using one of several methods: + - pidfile - exe - pattern @@ -13,7 +14,7 @@ Processes can be selected for monitoring using one of several methods: - cgroup - win_service -### Configuration: +## Configuration ```toml # Monitor process cpu and memory usage @@ -63,12 +64,12 @@ Processes can be selected for monitoring using one of several methods: # pid_finder = "pgrep" ``` -#### Windows support +### Windows support Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. -### Metrics: +## Metrics - procstat - tags: @@ -161,9 +162,9 @@ the `win_perf_counters` input plugin as a more mature alternative. *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* -### Example Output: +## Example Output -``` +```shell procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000 procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000 ``` diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 05cf4a72735f0..041e2cae91888 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/process" ) //NativeFinder uses gopsutil to find processes diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index 0148fdedca933..f6068ac268e0e 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -2,11 +2,9 @@ package procstat import ( "fmt" - "testing" - "os/user" + "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -19,7 +17,7 @@ func TestGather_RealPatternIntegration(t *testing.T) { pids, err := pg.Pattern(`procstat`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } func TestGather_RealFullPatternIntegration(t *testing.T) { @@ -31,7 +29,7 @@ func TestGather_RealFullPatternIntegration(t *testing.T) { pids, err := pg.FullPattern(`%procstat%`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } func TestGather_RealUserIntegration(t *testing.T) { @@ -45,5 +43,5 @@ func TestGather_RealUserIntegration(t *testing.T) { pids, err := pg.UID(user.Username) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index a8d8f3f51bfbd..f31cef4abe1c6 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" ) type Process interface { @@ -43,13 +43,13 @@ type Proc struct { } func NewProc(pid PID) (Process, error) { - process, err := process.NewProcess(int32(pid)) + p, err := process.NewProcess(int32(pid)) if err != nil { return nil, err } proc := &Proc{ - Process: process, + Process: p, hasCPUTimes: false, tags: make(map[string]string), } diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 09b5cc7cfa325..915a1b13f44b4 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/process" ) var ( diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index bc586fca4fa42..5b67232156bc1 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" - "github.com/stretchr/testify/assert" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func init() { @@ -51,11 +51,13 @@ MainPID=11408 ControlPID=0 ExecMainPID=11408 `) + //nolint:revive // error code is important for this "test" os.Exit(0) } //nolint:errcheck,revive fmt.Printf("command not found\n") + //nolint:revive // error code is important for this "test" os.Exit(1) } @@ -208,7 +210,7 @@ func TestGather_ProcessName(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) + require.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) } func TestGather_NoProcessNameUsesReal(t *testing.T) { @@ -222,7 +224,7 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasTag("procstat", "process_name")) + require.True(t, acc.HasTag("procstat", "process_name")) } func TestGather_NoPidTag(t *testing.T) { @@ -234,8 +236,8 @@ func TestGather_NoPidTag(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "pid")) - assert.False(t, acc.HasTag("procstat", "pid")) + require.True(t, acc.HasInt32Field("procstat", "pid")) + require.False(t, acc.HasTag("procstat", "pid")) } func TestGather_PidTag(t *testing.T) { @@ -248,8 +250,8 @@ func TestGather_PidTag(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "42", acc.TagValue("procstat", "pid")) - assert.False(t, acc.HasInt32Field("procstat", "pid")) + require.Equal(t, "42", acc.TagValue("procstat", "pid")) + require.False(t, acc.HasInt32Field("procstat", "pid")) } func TestGather_Prefix(t *testing.T) { @@ -262,7 +264,7 @@ func TestGather_Prefix(t *testing.T) { createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) + require.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) } func TestGather_Exe(t *testing.T) { @@ -275,7 +277,7 @@ func TestGather_Exe(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, exe, acc.TagValue("procstat", "exe")) + require.Equal(t, exe, acc.TagValue("procstat", "exe")) } func TestGather_User(t *testing.T) { @@ -289,7 +291,7 @@ func TestGather_User(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, user, acc.TagValue("procstat", "user")) + require.Equal(t, user, acc.TagValue("procstat", "user")) } func TestGather_Pattern(t *testing.T) { @@ -303,7 +305,7 @@ func TestGather_Pattern(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pattern, acc.TagValue("procstat", "pattern")) + require.Equal(t, pattern, acc.TagValue("procstat", "pattern")) } func TestGather_MissingPidMethod(t *testing.T) { @@ -327,7 +329,7 @@ func TestGather_PidFile(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) + require.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) } func TestGather_PercentFirstPass(t *testing.T) { @@ -342,8 +344,8 @@ func TestGather_PercentFirstPass(t *testing.T) { } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.False(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.False(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_PercentSecondPass(t *testing.T) { @@ -359,8 +361,8 @@ func TestGather_PercentSecondPass(t *testing.T) { require.NoError(t, acc.GatherError(p.Gather)) require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.True(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.True(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_systemdUnitPIDs(t *testing.T) { @@ -374,8 +376,8 @@ func TestGather_systemdUnitPIDs(t *testing.T) { tags := pidsTag.Tags err := pidsTag.Err require.NoError(t, err) - assert.Equal(t, []PID{11408}, pids) - assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + require.Equal(t, []PID{11408}, pids) + require.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) } } @@ -400,8 +402,8 @@ func TestGather_cgroupPIDs(t *testing.T) { tags := pidsTag.Tags err := pidsTag.Err require.NoError(t, err) - assert.Equal(t, []PID{1234, 5678}, pids) - assert.Equal(t, td, tags["cgroup"]) + require.Equal(t, []PID{1234, 5678}, pids) + require.Equal(t, td, tags["cgroup"]) } } diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index fe6d3a8e816da..74f49a2649c83 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -3,7 +3,7 @@ The prometheus input plugin gathers metrics from HTTP servers exposing metrics in Prometheus format. -### Configuration: +## Configuration ```toml # Read metrics from one or many prometheus clients @@ -49,7 +49,7 @@ in Prometheus format. ## Only for node scrape scope: node IP of the node that telegraf is running on. ## Either this config or the environment variable NODE_IP must be set. # node_ip = "10.180.1.1" - + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. ## Default is 60 seconds. # pod_scrape_interval = 60 @@ -100,7 +100,7 @@ in Prometheus format. `urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics` -#### Kubernetes Service Discovery +### Kubernetes Service Discovery URLs listed in the `kubernetes_services` parameter will be expanded by looking up all A records assigned to the hostname as described in @@ -109,7 +109,7 @@ by looking up all A records assigned to the hostname as described in This method can be used to locate all [Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services). -#### Kubernetes scraping +### Kubernetes scraping Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig @@ -124,7 +124,8 @@ Currently the following annotation are supported: Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. Using `pod_scrape_scope = "node"` allows more scalable scraping for pods which will scrape pods only in the node that telegraf is running. It will fetch the pod list locally from the node's kubelet. This will require running Telegraf in every node of the cluster. Note that either `node_ip` must be specified in the config or the environment variable `NODE_IP` must be set to the host IP. ThisThe latter can be done in the yaml of the pod running telegraf: -``` + +```sh env: - name: NODE_IP valueFrom: @@ -134,7 +135,47 @@ env: If using node level scrape scope, `pod_scrape_interval` specifies how often (in seconds) the pod list for scraping should updated. If not specified, the default is 60 seconds. -#### Consul Service Discovery +The pod running telegraf will need to have the proper rbac configuration in order to be allowed to call the k8s api to discover and watch pods in the cluster. +A typical configuration will create a service account, a cluster role with the appropriate rules and a cluster role binding to tie the cluster role to the service account. +Example of configuration for cluster level discovery: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: telegraf-k8s-role-{{.Release.Name}} +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +# Rolebinding for namespace to cluster-admin +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: telegraf-k8s-role-{{.Release.Name}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telegraf-k8s-role-{{.Release.Name}} +subjects: +- kind: ServiceAccount + name: telegraf-k8s-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: telegraf-k8s-{{ .Release.Name }} +``` + +### Consul Service Discovery Enabling this option and configuring consul `agent` url will allow the plugin to query consul catalog for available services. Using `query_interval` the plugin will periodically @@ -143,6 +184,7 @@ It can use the information from the catalog to build the scraped url and additio Multiple consul queries can be configured, each for different service. The following example fields can be used in url or tag templates: + * Node * Address * NodeMeta @@ -152,15 +194,15 @@ The following example fields can be used in url or tag templates: * ServiceMeta For full list of available fields and their type see struct CatalogService in -https://github.com/hashicorp/consul/blob/master/api/catalog.go + -#### Bearer Token +### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on each interval and its contents will be appended to the Bearer string in the Authorization header. -### Usage for Caddy HTTP server +## Usage for Caddy HTTP server Steps to monitor Caddy with Telegraf's Prometheus input plugin: @@ -178,7 +220,7 @@ Steps to monitor Caddy with Telegraf's Prometheus input plugin: > This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). -### Metrics: +## Metrics Measurement names are based on the Metric Family and tags are created for each label. The value is added to a field named based on the metric type. @@ -187,10 +229,11 @@ All metrics receive the `url` tag indicating the related URL specified in the Telegraf configuration. If using Kubernetes service discovery the `address` tag is also added indicating the discovered ip address. -### Example Output: +## Example Output -**Source** -``` +### Source + +```shell # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 7.4545e-05 @@ -211,8 +254,9 @@ cpu_usage_user{cpu="cpu2"} 2.0161290322588776 cpu_usage_user{cpu="cpu3"} 1.5045135406226022 ``` -**Output** -``` +### Output + +```shell go_gc_duration_seconds,url=http://example.org:9273/metrics 1=0.001336611,count=14,sum=0.004527551,0=0.000057965,0.25=0.000083812,0.5=0.000286537,0.75=0.000365303 1505776733000000000 go_goroutines,url=http://example.org:9273/metrics gauge=21 1505776695000000000 cpu_usage_user,cpu=cpu0,url=http://example.org:9273/metrics gauge=1.513622603430151 1505776751000000000 @@ -221,8 +265,9 @@ cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805 cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000 ``` -**Output (when metric_version = 2)** -``` +### Output (when metric_version = 2) + +```shell prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000 prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000 prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000 diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 9a4d6bd325c46..e3217e697d914 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -110,10 +110,10 @@ func (p *Prometheus) watchPod(ctx context.Context, client *kubernetes.Clientset) LabelSelector: p.KubernetesLabelSelector, FieldSelector: p.KubernetesFieldSelector, }) - defer watcher.Stop() if err != nil { return err } + defer watcher.Stop() for { select { diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 2f67607cd3cf3..b763cd14825b2 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -3,21 +3,21 @@ package prometheus import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" ) func TestScrapeURLNoAnnotations(t *testing.T) { p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} p.Annotations = map[string]string{} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Nil(t, url) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotationsNoScrape(t *testing.T) { @@ -25,56 +25,56 @@ func TestScrapeURLAnnotationsNoScrape(t *testing.T) { p.Name = "myPod" p.Annotations = map[string]string{"prometheus.io/scrape": "false"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Nil(t, url) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) } func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} url, err := getScrapeURL(p) - assert.NoError(t, err) - assert.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { @@ -83,7 +83,7 @@ func TestAddPod(t *testing.T) { p := pod() p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultipleDuplicatePods(t *testing.T) { @@ -94,7 +94,7 @@ func TestAddMultipleDuplicatePods(t *testing.T) { registerPod(p, prom) p.Name = "Pod2" registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultiplePods(t *testing.T) { @@ -106,7 +106,7 @@ func TestAddMultiplePods(t *testing.T) { p.Name = "Pod2" p.Status.PodIP = "127.0.0.2" registerPod(p, prom) - assert.Equal(t, 2, len(prom.kubernetesPods)) + require.Equal(t, 2, len(prom.kubernetesPods)) } func TestDeletePods(t *testing.T) { @@ -116,7 +116,7 @@ func TestDeletePods(t *testing.T) { p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) unregisterPod(p, prom) - assert.Equal(t, 0, len(prom.kubernetesPods)) + require.Equal(t, 0, len(prom.kubernetesPods)) } func TestPodHasMatchingNamespace(t *testing.T) { @@ -126,12 +126,12 @@ func TestPodHasMatchingNamespace(t *testing.T) { pod.Name = "Pod1" pod.Namespace = "default" shouldMatch := podHasMatchingNamespace(pod, prom) - assert.Equal(t, true, shouldMatch) + require.Equal(t, true, shouldMatch) pod.Name = "Pod2" pod.Namespace = "namespace" shouldNotMatch := podHasMatchingNamespace(pod, prom) - assert.Equal(t, false, shouldNotMatch) + require.Equal(t, false, shouldNotMatch) } func TestPodHasMatchingLabelSelector(t *testing.T) { @@ -148,8 +148,8 @@ func TestPodHasMatchingLabelSelector(t *testing.T) { pod.Labels["label5"] = "label5" labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) - assert.Equal(t, err, nil) - assert.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) } func TestPodHasMatchingFieldSelector(t *testing.T) { @@ -160,8 +160,8 @@ func TestPodHasMatchingFieldSelector(t *testing.T) { pod.Spec.NodeName = "node1000" fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) - assert.Equal(t, err, nil) - assert.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) } func TestInvalidFieldSelector(t *testing.T) { @@ -172,7 +172,7 @@ func TestInvalidFieldSelector(t *testing.T) { pod.Spec.NodeName = "node1000" _, err := fields.ParseSelector(prom.KubernetesFieldSelector) - assert.NotEqual(t, err, nil) + require.NotEqual(t, err, nil) } func pod() *corev1.Pod { diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index dfe5cc4749813..49bfa2afa4d27 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -10,13 +10,13 @@ import ( "net/http" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" - "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" ) func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { @@ -63,11 +63,13 @@ func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Met // summary metric fields = makeQuantiles(m) fields["count"] = float64(m.GetSummary().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetSummary().GetSampleSum()) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric fields = makeBuckets(m) fields["count"] = float64(m.GetHistogram().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetHistogram().GetSampleSum()) } else { // standard metric @@ -106,6 +108,7 @@ func makeQuantiles(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) for _, q := range m.GetSummary().Quantile { if !math.IsNaN(q.GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue()) } } @@ -126,14 +129,17 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) if m.Gauge != nil { if !math.IsNaN(m.GetGauge().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["gauge"] = float64(m.GetGauge().GetValue()) } } else if m.Counter != nil { if !math.IsNaN(m.GetCounter().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["counter"] = float64(m.GetCounter().GetValue()) } } else if m.Untyped != nil { if !math.IsNaN(m.GetUntyped().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["value"] = float64(m.GetUntyped().GetValue()) } } diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index ffd5967458c9f..24470a441a6b3 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. @@ -45,13 +45,13 @@ apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 func TestParseValidPrometheus(t *testing.T) { // Gauge value metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "cadvisor_version_info", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "gauge": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "osVersion": "CentOS Linux 7 (Core)", "cadvisorRevision": "", "cadvisorVersion": "", @@ -61,35 +61,35 @@ func TestParseValidPrometheus(t *testing.T) { // Counter value metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "get_token_fail_count", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "get_token_fail_count", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(0), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Summary data //SetDefaultTags(map[string]string{}) metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "0.5": 552048.506, "0.9": 5.876804288e+06, "0.99": 5.876804288e+06, "count": 9.0, "sum": 1.8909097205e+07, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "apiserver_request_latencies", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "500000": 2000.0, "count": 2025.0, "sum": 1.02726334e+08, @@ -101,7 +101,7 @@ func TestParseValidPrometheus(t *testing.T) { "125000": 1994.0, "1e+06": 2005.0, }, metrics[0].Fields()) - assert.Equal(t, + require.Equal(t, map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) } @@ -116,27 +116,27 @@ test_counter{label="test"} 1 %d // IgnoreTimestamp is false metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "test_counter", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "label": "test", }, metrics[0].Tags()) - assert.Equal(t, testTime, metrics[0].Time().UTC()) + require.Equal(t, testTime, metrics[0].Time().UTC()) // IgnoreTimestamp is true metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "test_counter", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "label": "test", }, metrics[0].Tags()) - assert.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) + require.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 18cbf6c8b3d59..2f8e17f196b32 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -13,14 +13,15 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - parser_v2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" + parserV2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` @@ -182,8 +183,7 @@ func (p *Prometheus) Description() string { } func (p *Prometheus) Init() error { - - // Config proccessing for node scrape scope for monitor_kubernetes_pods + // Config processing for node scrape scope for monitor_kubernetes_pods p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") if p.isNodeScrapeScope { // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address @@ -222,8 +222,6 @@ func (p *Prometheus) Init() error { return nil } -var ErrProtocolError = errors.New("prometheus protocol error") - func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { @@ -253,12 +251,12 @@ type URLAndAddress struct { func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { allURLs := make(map[string]URLAndAddress) for _, u := range p.URLs { - URL, err := url.Parse(u) + address, err := url.Parse(u) if err != nil { p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} + allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address} } p.lock.Lock() @@ -273,22 +271,22 @@ func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { } for _, service := range p.KubernetesServices { - URL, err := url.Parse(service) + address, err := url.Parse(service) if err != nil { return nil, err } - resolvedAddresses, err := net.LookupHost(URL.Hostname()) + resolvedAddresses, err := net.LookupHost(address.Hostname()) if err != nil { - p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error()) + p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", address.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { - serviceURL := p.AddressToURL(URL, resolved) + serviceURL := p.AddressToURL(address, resolved) allURLs[serviceURL.String()] = URLAndAddress{ URL: serviceURL, Address: resolved, - OriginalURL: URL, + OriginalURL: address, } } } @@ -401,8 +399,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error var resp *http.Response if u.URL.Scheme != "unix" { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = p.client.Do(req) } else { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = uClient.Do(req) } if err != nil { @@ -420,7 +420,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } if p.MetricVersion == 2 { - parser := parser_v2.Parser{ + parser := parserV2.Parser{ Header: resp.Header, IgnoreTimestamp: p.IgnoreTimestamp, } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 11117e05b45d9..f56cfef8f59da 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -10,11 +10,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/fields" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. @@ -67,12 +67,12 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.False(t, acc.HasTag("test_metric", "address")) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.False(t, acc.HasTag("test_metric", "address")) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") } func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { @@ -95,12 +95,12 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.True(t, acc.TagValue("test_metric", "address") == tsAddress) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.TagValue("test_metric", "address") == tsAddress) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL) } func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) { @@ -125,10 +125,10 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) } func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { @@ -149,10 +149,10 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0") - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + require.True(t, acc.TagSetValue("prometheus", "quantile") == "0") + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") } func TestSummaryMayContainNaN(t *testing.T) { @@ -237,9 +237,9 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("prometheus", "go_goroutines")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("prometheus", "go_goroutines")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + require.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) } func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { @@ -262,7 +262,7 @@ func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { require.NoError(t, err) m, _ := acc.Get("test_metric") - assert.WithinDuration(t, time.Now(), m.Time, 5*time.Second) + require.WithinDuration(t, time.Now(), m.Time, 5*time.Second) } func TestUnsupportedFieldSelector(t *testing.T) { @@ -271,8 +271,8 @@ func TestUnsupportedFieldSelector(t *testing.T) { fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector) isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector) - assert.Equal(t, false, isValid) - assert.Equal(t, "spec.containerName", invalidSelector) + require.Equal(t, false, isValid) + require.Equal(t, "spec.containerName", invalidSelector) } func TestInitConfigErrors(t *testing.T) { diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index db9f57e974d2d..4b76ce5c326a3 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -4,7 +4,7 @@ The proxmox plugin gathers metrics about containers and VMs using the Proxmox AP Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration ```toml [[inputs.proxmox]] @@ -25,13 +25,13 @@ Telegraf minimum version: Telegraf 1.16.0 response_timeout = "5s" ``` -#### Permissions +### Permissions The plugin will need to have access to the Proxmox API. An API token must be provided with the corresponding user being assigned at least the PVEAuditor role on /. -### Measurements & Fields: +## Measurements & Fields - proxmox - status @@ -50,16 +50,16 @@ role on /. - disk_free - disk_used_percentage -### Tags: +## Tags - - node_fqdn - FQDN of the node telegraf is running on - - vm_name - Name of the VM/container - - vm_fqdn - FQDN of the VM/container - - vm_type - Type of the VM/container (lxc, qemu) +- node_fqdn - FQDN of the node telegraf is running on +- vm_name - Name of the VM/container +- vm_fqdn - FQDN of the VM/container +- vm_type - Type of the VM/container (lxc, qemu) -### Example Output: +## Example Output -``` +```text $ ./telegraf --config telegraf.conf --input-filter proxmox --test > proxmox,host=pxnode,node_fqdn=pxnode.example.com,vm_fqdn=vm1.example.com,vm_name=vm1,vm_type=lxc cpuload=0.147998116735236,disk_free=4461129728i,disk_total=5217320960i,disk_used=756191232i,disk_used_percentage=14,mem_free=1046827008i,mem_total=1073741824i,mem_used=26914816i,mem_used_percentage=2,status="running",swap_free=536698880i,swap_total=536870912i,swap_used=172032i,swap_used_percentage=0,uptime=1643793i 1595457277000000000 > ... diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index efd7fae7d5d5f..c8234a6d8e75c 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -163,8 +163,8 @@ func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { } } -func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VMStat, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id json.Number) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(id) + "/status/current" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { @@ -196,8 +196,8 @@ func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { return vmStats, nil } -func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { - apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmID + "/config" +func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(vmID) + "/config" jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return VMConfig{}, err @@ -213,30 +213,30 @@ func getVMConfig(px *Proxmox, vmID string, rt ResourceType) (VMConfig, error) { } func getFields(vmStat VMStat) map[string]interface{} { - memTotal, memUsed, memFree, memUsedPercentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) - swapTotal, swapUsed, swapFree, swapUsedPercentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) - diskTotal, diskUsed, diskFree, diskUsedPercentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) + memMetrics := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) + swapMetrics := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) + diskMetrics := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) return map[string]interface{}{ "status": vmStat.Status, "uptime": jsonNumberToInt64(vmStat.Uptime), "cpuload": jsonNumberToFloat64(vmStat.CPULoad), - "mem_used": memUsed, - "mem_total": memTotal, - "mem_free": memFree, - "mem_used_percentage": memUsedPercentage, - "swap_used": swapUsed, - "swap_total": swapTotal, - "swap_free": swapFree, - "swap_used_percentage": swapUsedPercentage, - "disk_used": diskUsed, - "disk_total": diskTotal, - "disk_free": diskFree, - "disk_used_percentage": diskUsedPercentage, + "mem_used": memMetrics.used, + "mem_total": memMetrics.total, + "mem_free": memMetrics.free, + "mem_used_percentage": memMetrics.usedPercentage, + "swap_used": swapMetrics.used, + "swap_total": swapMetrics.total, + "swap_free": swapMetrics.free, + "swap_used_percentage": swapMetrics.usedPercentage, + "disk_used": diskMetrics.used, + "disk_total": diskMetrics.total, + "disk_free": diskMetrics.free, + "disk_used_percentage": diskMetrics.usedPercentage, } } -func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) { +func getByteMetrics(total json.Number, used json.Number) metrics { int64Total := jsonNumberToInt64(total) int64Used := jsonNumberToInt64(used) int64Free := int64Total - int64Used @@ -245,7 +245,12 @@ func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, f usedPercentage = float64(int64Used) * 100 / float64(int64Total) } - return int64Total, int64Used, int64Free, usedPercentage + return metrics{ + total: int64Total, + used: int64Used, + free: int64Free, + usedPercentage: usedPercentage, + } } func jsonNumberToInt64(value json.Number) int64 { diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index f05b6450bd7be..b0916a5f3dd8e 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -5,15 +5,15 @@ import ( "strings" "testing" - "github.com/bmizerany/assert" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}` var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` -var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` +var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"},{"vmid":112,"type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container2"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` @@ -59,7 +59,7 @@ func TestGetNodeSearchDomain(t *testing.T) { err := getNodeSearchDomain(px) require.NoError(t, err) - assert.Equal(t, px.nodeSearchDomain, "test.example.com") + require.Equal(t, px.nodeSearchDomain, "test.example.com") } func TestGatherLxcData(t *testing.T) { @@ -69,7 +69,7 @@ func TestGatherLxcData(t *testing.T) { acc := &testutil.Accumulator{} gatherLxcData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2078164), @@ -103,7 +103,7 @@ func TestGatherQemuData(t *testing.T) { acc := &testutil.Accumulator{} gatherQemuData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2159739), @@ -139,5 +139,5 @@ func TestGather(t *testing.T) { require.NoError(t, err) // Results from both tests above - assert.Equal(t, acc.NFields(), 30) + require.Equal(t, acc.NFields(), 30) } diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index c064150c061f6..78d0010b501eb 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -41,7 +41,7 @@ type VMCurrentStats struct { } type VMStat struct { - ID string `json:"vmid"` + ID json.Number `json:"vmid"` Name string `json:"name"` Status string `json:"status"` UsedMem json.Number `json:"mem"` @@ -67,3 +67,10 @@ type NodeDNS struct { Searchdomain string `json:"search"` } `json:"data"` } + +type metrics struct { + total int64 + used int64 + free int64 + usedPercentage float64 +} diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 1406064d5c617..db85dfe94a589 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -1,12 +1,12 @@ # PuppetAgent Input Plugin -#### Description +## Description The puppetagent plugin collects variables outputted from the 'last_run_summary.yaml' file usually located in `/var/lib/puppet/state/` [PuppetAgent Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs/). -``` +```sh cat /var/lib/puppet/state/last_run_summary.yaml --- @@ -45,7 +45,7 @@ cat /var/lib/puppet/state/last_run_summary.yaml puppet: "3.7.5" ``` -``` +```sh jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent --config tele.conf --test * Plugin: puppetagent, Collection 1 > [] puppetagent_events_failure value=0 @@ -77,65 +77,72 @@ jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent - > [] puppetagent_version_puppet value=3.7.5 ``` -## Measurements: -#### PuppetAgent int64 measurements: +## Measurements + +### PuppetAgent int64 measurements Meta: + - units: int64 - tags: `` Measurement names: - - puppetagent_changes_total - - puppetagent_events_failure - - puppetagent_events_total - - puppetagent_events_success - - puppetagent_resources_changed - - puppetagent_resources_corrective_change - - puppetagent_resources_failed - - puppetagent_resources_failedtorestart - - puppetagent_resources_outofsync - - puppetagent_resources_restarted - - puppetagent_resources_scheduled - - puppetagent_resources_skipped - - puppetagent_resources_total - - puppetagent_time_service - - puppetagent_time_lastrun - - puppetagent_version_config - -#### PuppetAgent float64 measurements: + +- puppetagent_changes_total +- puppetagent_events_failure +- puppetagent_events_total +- puppetagent_events_success +- puppetagent_resources_changed +- puppetagent_resources_corrective_change +- puppetagent_resources_failed +- puppetagent_resources_failedtorestart +- puppetagent_resources_outofsync +- puppetagent_resources_restarted +- puppetagent_resources_scheduled +- puppetagent_resources_skipped +- puppetagent_resources_total +- puppetagent_time_service +- puppetagent_time_lastrun +- puppetagent_version_config + +### PuppetAgent float64 measurements Meta: + - units: float64 - tags: `` Measurement names: - - puppetagent_time_anchor - - puppetagent_time_catalogapplication - - puppetagent_time_configretrieval - - puppetagent_time_convertcatalog - - puppetagent_time_cron - - puppetagent_time_exec - - puppetagent_time_factgeneration - - puppetagent_time_file - - puppetagent_time_filebucket - - puppetagent_time_group - - puppetagent_time_lastrun - - puppetagent_time_noderetrieval - - puppetagent_time_notify - - puppetagent_time_package - - puppetagent_time_pluginsync - - puppetagent_time_schedule - - puppetagent_time_sshauthorizedkey - - puppetagent_time_total - - puppetagent_time_transactionevaluation - - puppetagent_time_user - - puppetagent_version_config - -#### PuppetAgent string measurements: + +- puppetagent_time_anchor +- puppetagent_time_catalogapplication +- puppetagent_time_configretrieval +- puppetagent_time_convertcatalog +- puppetagent_time_cron +- puppetagent_time_exec +- puppetagent_time_factgeneration +- puppetagent_time_file +- puppetagent_time_filebucket +- puppetagent_time_group +- puppetagent_time_lastrun +- puppetagent_time_noderetrieval +- puppetagent_time_notify +- puppetagent_time_package +- puppetagent_time_pluginsync +- puppetagent_time_schedule +- puppetagent_time_sshauthorizedkey +- puppetagent_time_total +- puppetagent_time_transactionevaluation +- puppetagent_time_user +- puppetagent_version_config + +### PuppetAgent string measurements Meta: + - units: string - tags: `` Measurement names: - - puppetagent_version_puppet + +- puppetagent_version_puppet diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 5f106642adeb6..b89cd4da6e1ed 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -7,7 +7,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management [management]: https://www.rabbitmq.com/management.html [management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html -### Configuration +## Configuration ```toml [[inputs.rabbitmq]] @@ -66,7 +66,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management # federation_upstream_exclude = [] ``` -### Metrics +## Metrics - rabbitmq_overview - tags: @@ -90,7 +90,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - return_unroutable (int, number of unroutable messages) - return_unroutable_rate (float, number of unroutable messages per second) -+ rabbitmq_node +- rabbitmq_node - tags: - url - node @@ -182,7 +182,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - slave_nodes (int, count) - synchronised_slave_nodes (int, count) -+ rabbitmq_exchange +- rabbitmq_exchange - tags: - url - exchange @@ -217,17 +217,17 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - messages_publish (int, count) - messages_return_unroutable (int, count) -### Sample Queries +## Sample Queries Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: -``` +```sql SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) ``` -### Example Output +## Example Output -``` +```text rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000 rabbitmq_overview,url=http://amqp.example.org:15672,host=amqp.example.org channels=2i,consumers=1i,exchanges=17i,messages_acked=329i,messages=0i,messages_ready=0i,messages_unacked=0i,connections=2i,queues=1i,messages_delivered=329i,messages_published=329i,clustering_listeners=2i,amqp_listeners=1i 1493684035000000000 rabbitmq_node,url=http://amqp.example.org:15672,node=rabbit@amqp.example.org,host=amqp.example.org fd_total=1024i,fd_used=32i,mem_limit=8363329126i,sockets_total=829i,disk_free=8175935488i,disk_free_limit=50000000i,mem_used=58771080i,proc_total=1048576i,proc_used=267i,run_queue=0i,sockets_used=2i,running=1i 149368403500000000 diff --git a/plugins/inputs/raindrops/README.md b/plugins/inputs/raindrops/README.md index cdc13eec2d9a6..c380310513e0f 100644 --- a/plugins/inputs/raindrops/README.md +++ b/plugins/inputs/raindrops/README.md @@ -3,7 +3,7 @@ The [raindrops](http://raindrops.bogomips.org/) plugin reads from specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) URI and adds stats to InfluxDB. -### Configuration: +## Configuration ```toml # Read raindrops stats @@ -11,31 +11,31 @@ specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middlewa urls = ["http://localhost:8080/_raindrops"] ``` -### Measurements & Fields: +## Measurements & Fields - raindrops - - calling (integer, count) - - writing (integer, count) + - calling (integer, count) + - writing (integer, count) - raindrops_listen - - active (integer, bytes) - - queued (integer, bytes) + - active (integer, bytes) + - queued (integer, bytes) -### Tags: +## Tags - Raindops calling/writing of all the workers: - - server - - port + - server + - port - raindrops_listen (ip:port): - - ip - - port + - ip + - port - raindrops_listen (Unix Socket): - - socket + - socket -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter raindrops --test * Plugin: raindrops, Collection 1 > raindrops,port=8080,server=localhost calling=0i,writing=0i 1455479896806238204 diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index 904d5418ec8db..cf1db2d1f6f98 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -116,7 +116,6 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } activeLineStr, activeErr = buf.ReadString('\n') if activeErr != nil { - iterate = false break } if strings.Compare(activeLineStr, "\n") == 0 { @@ -154,7 +153,7 @@ func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { } acc.AddFields("raindrops_listen", lis, tags) } - return nil + return nil //nolint:nilerr // nil returned on purpose } // Get tag(s) for the raindrops calling/writing plugin diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index 591dd624a10ea..6da64dbb4d207 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -7,11 +7,11 @@ import ( "net/http/httptest" "net/url" "testing" + "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "time" + + "github.com/influxdata/telegraf/testutil" ) const sampleResponse = ` @@ -41,7 +41,7 @@ func TestRaindropsTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := r.getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md index 9c1cda75bff10..65dee749ee7bd 100644 --- a/plugins/inputs/ras/README.md +++ b/plugins/inputs/ras/README.md @@ -4,7 +4,7 @@ This plugin is only available on Linux (only for `386`, `amd64`, `arm` and `arm6 The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). -### Configuration +## Configuration ```toml [[inputs.ras]] @@ -15,7 +15,7 @@ The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://githu In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. -### Metrics +## Metrics - ras - tags: @@ -40,6 +40,7 @@ In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case - unclassified_mce_errors Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: + - internal_timer_errors - smm_handler_code_access_violation_errors - internal_parity_errors @@ -48,13 +49,13 @@ Please note that `processor_base_errors` is aggregate counter measuring the foll - microcode_rom_parity_errors - unclassified_mce_errors -### Permissions +## Permissions This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. -### Example Output +## Example Output -``` +```shell ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 ``` diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8d4ba727d7df..e3f35b06e0c8d 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -23,11 +23,11 @@ type Ras struct { DBPath string `toml:"db_path"` Log telegraf.Logger `toml:"-"` - db *sql.DB `toml:"-"` - latestTimestamp time.Time `toml:"-"` - cpuSocketCounters map[int]metricCounters `toml:"-"` - serverCounters metricCounters `toml:"-"` + db *sql.DB + latestTimestamp time.Time + cpuSocketCounters map[int]metricCounters + serverCounters metricCounters } type machineCheckError struct { diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index 656200fde95cc..d4e87dfe5f12c 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -8,9 +8,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestUpdateCounters(t *testing.T) { @@ -19,20 +19,20 @@ func TestUpdateCounters(t *testing.T) { ras.updateCounters(&mce) } - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") for metric, value := range ras.cpuSocketCounters[0] { if metric == processorBase { // processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors, // internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors - assert.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) + require.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) } else { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } @@ -61,9 +61,9 @@ func TestUpdateLatestTimestamp(t *testing.T) { }...) for _, mce := range testData { err := ras.updateLatestTimestamp(mce.Timestamp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) + require.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) } func TestMultipleSockets(t *testing.T) { @@ -99,14 +99,14 @@ func TestMultipleSockets(t *testing.T) { for _, mce := range testData { ras.updateCounters(&mce) } - assert.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") + require.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") for _, metricData := range ras.cpuSocketCounters { for metric, value := range metricData { if metric == levelTwoCache { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) } else { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } } @@ -117,21 +117,21 @@ func TestMissingDatabase(t *testing.T) { ras := newRas() ras.DBPath = "/nonexistent/ras.db" err := ras.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestEmptyDatabase(t *testing.T) { ras := newRas() - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") - assert.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") + require.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") for metric, value := range ras.cpuSocketCounters[0] { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } diff --git a/plugins/inputs/ravendb/README.md b/plugins/inputs/ravendb/README.md index b40850ab5c82d..e527d167f1281 100644 --- a/plugins/inputs/ravendb/README.md +++ b/plugins/inputs/ravendb/README.md @@ -4,7 +4,7 @@ Reads metrics from RavenDB servers via monitoring endpoints APIs. Requires RavenDB Server 5.2+. -### Configuration +## Configuration The following is an example config for RavenDB. **Note:** The client certificate used should have `Operator` permissions on the cluster. @@ -43,7 +43,7 @@ The following is an example config for RavenDB. **Note:** The client certificate # collection_stats_dbs = [] ``` -### Metrics +## Metrics - ravendb_server - tags: @@ -57,7 +57,7 @@ The following is an example config for RavenDB. **Note:** The client certificate - certificate_server_certificate_expiration_left_in_sec (optional) - certificate_well_known_admin_certificates (optional, separated by ';') - cluster_current_term - - cluster_index + - cluster_index - cluster_node_state - 0 -> Passive - 1 -> Candidate @@ -147,7 +147,7 @@ The following is an example config for RavenDB. **Note:** The client certificate - uptime_in_sec - ravendb_indexes - - tags: + - tags: - database_name - index_name - node_tag @@ -201,16 +201,16 @@ The following is an example config for RavenDB. **Note:** The client certificate - tombstones_size_in_bytes - total_size_in_bytes -### Example output +## Example output -``` +```text > ravendb_server,cluster_id=07aecc42-9194-4181-999c-1c42450692c9,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 backup_current_number_of_running_backups=0i,backup_max_number_of_concurrent_backups=4i,certificate_server_certificate_expiration_left_in_sec=-1,cluster_current_term=2i,cluster_index=10i,cluster_node_state=4i,config_server_urls="http://127.0.0.1:8080",cpu_assigned_processor_count=8i,cpu_machine_usage=19.09944089456869,cpu_process_usage=0.16977205323024872,cpu_processor_count=8i,cpu_thread_pool_available_completion_port_threads=1000i,cpu_thread_pool_available_worker_threads=32763i,databases_loaded_count=1i,databases_total_count=1i,disk_remaining_storage_space_percentage=18i,disk_system_store_total_data_file_size_in_mb=35184372088832i,disk_system_store_used_data_file_size_in_mb=31379031064576i,disk_total_free_space_in_mb=42931i,license_expiration_left_in_sec=24079222.8772186,license_max_cores=256i,license_type="Enterprise",license_utilized_cpu_cores=8i,memory_allocated_in_mb=205i,memory_installed_in_mb=16384i,memory_low_memory_severity=0i,memory_physical_in_mb=16250i,memory_total_dirty_in_mb=0i,memory_total_swap_size_in_mb=0i,memory_total_swap_usage_in_mb=0i,memory_working_set_swap_usage_in_mb=0i,network_concurrent_requests_count=1i,network_last_request_time_in_sec=0.0058717,network_requests_per_sec=0.09916543455308825,network_tcp_active_connections=128i,network_total_requests=10i,server_full_version="5.2.0-custom-52",server_process_id=31044i,server_version="5.2",uptime_in_sec=56i 1613027977000000000 > ravendb_databases,database_id=ced0edba-8f80-48b8-8e81-c3d2c6748ec3,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 counts_alerts=0i,counts_attachments=17i,counts_documents=1059i,counts_performance_hints=0i,counts_rehabs=0i,counts_replication_factor=1i,counts_revisions=5475i,counts_unique_attachments=17i,indexes_auto_count=0i,indexes_count=7i,indexes_disabled_count=0i,indexes_errored_count=0i,indexes_errors_count=0i,indexes_idle_count=0i,indexes_stale_count=0i,indexes_static_count=7i,statistics_doc_puts_per_sec=0,statistics_map_index_indexes_per_sec=0,statistics_map_reduce_index_mapped_per_sec=0,statistics_map_reduce_index_reduced_per_sec=0,statistics_request_average_duration_in_ms=0,statistics_requests_count=0i,statistics_requests_per_sec=0,storage_documents_allocated_data_file_in_mb=140737488355328i,storage_documents_used_data_file_in_mb=74741020884992i,storage_indexes_allocated_data_file_in_mb=175921860444160i,storage_indexes_used_data_file_in_mb=120722940755968i,storage_total_allocated_storage_file_in_mb=325455441821696i,storage_total_free_space_in_mb=42931i,uptime_in_sec=54 1613027977000000000 > ravendb_indexes,database_name=db1,host=DESKTOP-2OISR6D,index_name=Orders/Totals,node_tag=A,url=http://localhost:8080 errors=0i,is_invalid=false,lock_mode="Unlock",mapped_per_sec=0,priority="Normal",reduced_per_sec=0,state="Normal",status="Running",time_since_last_indexing_in_sec=45.4256655,time_since_last_query_in_sec=45.4304202,type="Map" 1613027977000000000 > ravendb_collections,collection_name=@hilo,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 documents_count=8i,documents_size_in_bytes=122880i,revisions_size_in_bytes=0i,tombstones_size_in_bytes=122880i,total_size_in_bytes=245760i 1613027977000000000 ``` -### Contributors +## Contributors -- Marcin Lewandowski (https://github.com/ml054/) -- Casey Barton (https://github.com/bartoncasey) \ No newline at end of file +- Marcin Lewandowski () +- Casey Barton () diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md index cabf7e088047b..a033493e605a1 100644 --- a/plugins/inputs/redfish/README.md +++ b/plugins/inputs/redfish/README.md @@ -4,7 +4,7 @@ The `redfish` plugin gathers metrics and status information about CPU temperatur Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration ```toml [[inputs.redfish]] @@ -29,7 +29,7 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - redfish_thermal_temperatures - tags: @@ -50,8 +50,7 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - -+ redfish_thermal_fans +- redfish_thermal_fans - tags: - source - member_id @@ -70,7 +69,6 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - - redfish_power_powersupplies - tags: - source @@ -90,7 +88,6 @@ Telegraf minimum version: Telegraf 1.15.0 - power_input_watts - power_output_watts - - redfish_power_voltages (available only if voltage data is found) - tags: - source @@ -110,10 +107,9 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal +## Example Output -### Example Output - -``` +```text redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index dcf26b192c651..bda0779c941b6 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -176,8 +176,8 @@ func (r *Redfish) Init() error { return nil } -func (r *Redfish) getData(url string, payload interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (r *Redfish) getData(address string, payload interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 4cbbb045302c1..04a102014490f 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -761,40 +761,42 @@ func TestInvalidDellJSON(t *testing.T) { }, } for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { - http.Error(w, "Unauthorized.", 401) - return - } + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !checkAuth(r, "test", "test") { + http.Error(w, "Unauthorized.", 401) + return + } - switch r.URL.Path { - case "/redfish/v1/Chassis/System.Embedded.1/Thermal": - http.ServeFile(w, r, tt.thermalfilename) - case "/redfish/v1/Chassis/System.Embedded.1/Power": - http.ServeFile(w, r, tt.powerfilename) - case "/redfish/v1/Chassis/System.Embedded.1": - http.ServeFile(w, r, tt.chassisfilename) - case "/redfish/v1/Systems/System.Embedded.1": - http.ServeFile(w, r, tt.hostnamefilename) - default: - w.WriteHeader(http.StatusNotFound) + switch r.URL.Path { + case "/redfish/v1/Chassis/System.Embedded.1/Thermal": + http.ServeFile(w, r, tt.thermalfilename) + case "/redfish/v1/Chassis/System.Embedded.1/Power": + http.ServeFile(w, r, tt.powerfilename) + case "/redfish/v1/Chassis/System.Embedded.1": + http.ServeFile(w, r, tt.chassisfilename) + case "/redfish/v1/Systems/System.Embedded.1": + http.ServeFile(w, r, tt.hostnamefilename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Redfish{ + Address: ts.URL, + Username: "test", + Password: "test", + ComputerSystemID: "System.Embedded.1", } - })) - defer ts.Close() - - plugin := &Redfish{ - Address: ts.URL, - Username: "test", - Password: "test", - ComputerSystemID: "System.Embedded.1", - } - require.NoError(t, plugin.Init()) + require.NoError(t, plugin.Init()) - var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "error parsing input:") + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") + }) } } diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index bd89ea75346b2..eff031bab7cfd 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -1,6 +1,6 @@ # Redis Input Plugin -### Configuration: +## Configuration ```toml # Read Redis's basic status information @@ -37,7 +37,7 @@ # insecure_skip_verify = true ``` -### Measurements & Fields: +## Measurements & Fields The plugin gathers the results of the [INFO](https://redis.io/commands/info) redis command. There are two separate measurements: _redis_ and _redis\_keyspace_, the latter is used for gathering database related statistics. @@ -45,97 +45,97 @@ There are two separate measurements: _redis_ and _redis\_keyspace_, the latter i Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) and the elapsed time since the last rdb save (rdb\_last\_save\_time\_elapsed). - redis - - keyspace_hitrate(float, number) - - rdb_last_save_time_elapsed(int, seconds) + - keyspace_hitrate(float, number) + - rdb_last_save_time_elapsed(int, seconds) **Server** - - uptime(int, seconds) - - lru_clock(int, number) - - redis_version(string) + - uptime(int, seconds) + - lru_clock(int, number) + - redis_version(string) **Clients** - - clients(int, number) - - client_longest_output_list(int, number) - - client_biggest_input_buf(int, number) - - blocked_clients(int, number) + - clients(int, number) + - client_longest_output_list(int, number) + - client_biggest_input_buf(int, number) + - blocked_clients(int, number) **Memory** - - used_memory(int, bytes) - - used_memory_rss(int, bytes) - - used_memory_peak(int, bytes) - - total_system_memory(int, bytes) - - used_memory_lua(int, bytes) - - maxmemory(int, bytes) - - maxmemory_policy(string) - - mem_fragmentation_ratio(float, number) + - used_memory(int, bytes) + - used_memory_rss(int, bytes) + - used_memory_peak(int, bytes) + - total_system_memory(int, bytes) + - used_memory_lua(int, bytes) + - maxmemory(int, bytes) + - maxmemory_policy(string) + - mem_fragmentation_ratio(float, number) **Persistence** - - loading(int,flag) - - rdb_changes_since_last_save(int, number) - - rdb_bgsave_in_progress(int, flag) - - rdb_last_save_time(int, seconds) - - rdb_last_bgsave_status(string) - - rdb_last_bgsave_time_sec(int, seconds) - - rdb_current_bgsave_time_sec(int, seconds) - - aof_enabled(int, flag) - - aof_rewrite_in_progress(int, flag) - - aof_rewrite_scheduled(int, flag) - - aof_last_rewrite_time_sec(int, seconds) - - aof_current_rewrite_time_sec(int, seconds) - - aof_last_bgrewrite_status(string) - - aof_last_write_status(string) + - loading(int,flag) + - rdb_changes_since_last_save(int, number) + - rdb_bgsave_in_progress(int, flag) + - rdb_last_save_time(int, seconds) + - rdb_last_bgsave_status(string) + - rdb_last_bgsave_time_sec(int, seconds) + - rdb_current_bgsave_time_sec(int, seconds) + - aof_enabled(int, flag) + - aof_rewrite_in_progress(int, flag) + - aof_rewrite_scheduled(int, flag) + - aof_last_rewrite_time_sec(int, seconds) + - aof_current_rewrite_time_sec(int, seconds) + - aof_last_bgrewrite_status(string) + - aof_last_write_status(string) **Stats** - - total_connections_received(int, number) - - total_commands_processed(int, number) - - instantaneous_ops_per_sec(int, number) - - total_net_input_bytes(int, bytes) - - total_net_output_bytes(int, bytes) - - instantaneous_input_kbps(float, KB/sec) - - instantaneous_output_kbps(float, KB/sec) - - rejected_connections(int, number) - - sync_full(int, number) - - sync_partial_ok(int, number) - - sync_partial_err(int, number) - - expired_keys(int, number) - - evicted_keys(int, number) - - keyspace_hits(int, number) - - keyspace_misses(int, number) - - pubsub_channels(int, number) - - pubsub_patterns(int, number) - - latest_fork_usec(int, microseconds) - - migrate_cached_sockets(int, number) + - total_connections_received(int, number) + - total_commands_processed(int, number) + - instantaneous_ops_per_sec(int, number) + - total_net_input_bytes(int, bytes) + - total_net_output_bytes(int, bytes) + - instantaneous_input_kbps(float, KB/sec) + - instantaneous_output_kbps(float, KB/sec) + - rejected_connections(int, number) + - sync_full(int, number) + - sync_partial_ok(int, number) + - sync_partial_err(int, number) + - expired_keys(int, number) + - evicted_keys(int, number) + - keyspace_hits(int, number) + - keyspace_misses(int, number) + - pubsub_channels(int, number) + - pubsub_patterns(int, number) + - latest_fork_usec(int, microseconds) + - migrate_cached_sockets(int, number) **Replication** - - connected_slaves(int, number) - - master_link_down_since_seconds(int, number) - - master_link_status(string) - - master_repl_offset(int, number) - - second_repl_offset(int, number) - - repl_backlog_active(int, number) - - repl_backlog_size(int, bytes) - - repl_backlog_first_byte_offset(int, number) - - repl_backlog_histlen(int, bytes) + - connected_slaves(int, number) + - master_link_down_since_seconds(int, number) + - master_link_status(string) + - master_repl_offset(int, number) + - second_repl_offset(int, number) + - repl_backlog_active(int, number) + - repl_backlog_size(int, bytes) + - repl_backlog_first_byte_offset(int, number) + - repl_backlog_histlen(int, bytes) **CPU** - - used_cpu_sys(float, number) - - used_cpu_user(float, number) - - used_cpu_sys_children(float, number) - - used_cpu_user_children(float, number) + - used_cpu_sys(float, number) + - used_cpu_user(float, number) + - used_cpu_sys_children(float, number) + - used_cpu_user_children(float, number) **Cluster** - - cluster_enabled(int, flag) + - cluster_enabled(int, flag) - redis_keyspace - - keys(int, number) - - expires(int, number) - - avg_ttl(int, number) + - keys(int, number) + - expires(int, number) + - avg_ttl(int, number) - redis_cmdstat Every Redis used command will have 3 new fields: - - calls(int, number) - - usec(int, mircoseconds) - - usec_per_call(float, microseconds) + - calls(int, number) + - usec(int, mircoseconds) + - usec_per_call(float, microseconds) - redis_replication - tags: @@ -148,22 +148,23 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - lag(int, number) - offset(int, number) -### Tags: +## Tags - All measurements have the following tags: - - port - - server - - replication_role + - port + - server + - replication_role - The redis_keyspace measurement has an additional database tag: - - database + - database - The redis_cmdstat measurement has an additional tag: - - command + - command -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.redis]] ## specify servers via a url matching: @@ -178,22 +179,26 @@ Using this configuration: ``` When run with: -``` + +```sh ./telegraf --config telegraf.conf --input-filter redis --test ``` It produces: -``` + +```shell * Plugin: redis, Collection 1 > redis,server=localhost,port=6379,replication_role=master,host=host keyspace_hitrate=1,clients=2i,blocked_clients=0i,instantaneous_input_kbps=0,sync_full=0i,pubsub_channels=0i,pubsub_patterns=0i,total_net_output_bytes=6659253i,used_memory=842448i,total_system_memory=8351916032i,aof_current_rewrite_time_sec=-1i,rdb_changes_since_last_save=0i,sync_partial_err=0i,latest_fork_usec=508i,instantaneous_output_kbps=0,expired_keys=0i,used_memory_peak=843416i,aof_rewrite_in_progress=0i,aof_last_bgrewrite_status="ok",migrate_cached_sockets=0i,connected_slaves=0i,maxmemory_policy="noeviction",aof_rewrite_scheduled=0i,total_net_input_bytes=3125i,used_memory_rss=9564160i,repl_backlog_histlen=0i,rdb_last_bgsave_status="ok",aof_last_rewrite_time_sec=-1i,keyspace_misses=0i,client_biggest_input_buf=5i,used_cpu_user=1.33,maxmemory=0i,rdb_current_bgsave_time_sec=-1i,total_commands_processed=271i,repl_backlog_size=1048576i,used_cpu_sys=3,uptime=2822i,lru_clock=16706281i,used_memory_lua=37888i,rejected_connections=0i,sync_partial_ok=0i,evicted_keys=0i,rdb_last_save_time_elapsed=1922i,rdb_last_save_time=1493099368i,instantaneous_ops_per_sec=0i,used_cpu_user_children=0,client_longest_output_list=0i,master_repl_offset=0i,repl_backlog_active=0i,keyspace_hits=2i,used_cpu_sys_children=0,cluster_enabled=0i,rdb_last_bgsave_time_sec=0i,aof_last_write_status="ok",total_connections_received=263i,aof_enabled=0i,repl_backlog_first_byte_offset=0i,mem_fragmentation_ratio=11.35,loading=0i,rdb_bgsave_in_progress=0i 1493101290000000000 ``` redis_keyspace: -``` + +```shell > redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000 ``` redis_command: -``` + +```shell > redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000 ``` diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index 6f8abbda6be0c..a7ca994c53f80 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -8,9 +8,9 @@ import ( "time" "github.com/go-redis/redis" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type testClient struct { @@ -165,7 +165,7 @@ func TestRedis_ParseMetrics(t *testing.T) { "total_writes_processed": int64(17), "lazyfree_pending_objects": int64(0), "maxmemory": int64(0), - "maxmemory_policy": string("noeviction"), + "maxmemory_policy": "noeviction", "mem_aof_buffer": int64(0), "mem_clients_normal": int64(17440), "mem_clients_slaves": int64(0), @@ -202,7 +202,7 @@ func TestRedis_ParseMetrics(t *testing.T) { } } } - assert.InDelta(t, + require.InDelta(t, time.Now().Unix()-fields["rdb_last_save_time"].(int64), fields["rdb_last_save_time_elapsed"].(int64), 2) // allow for 2 seconds worth of offset diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md index b1946644ea13a..852da8318e704 100644 --- a/plugins/inputs/rethinkdb/README.md +++ b/plugins/inputs/rethinkdb/README.md @@ -2,7 +2,7 @@ Collect metrics from [RethinkDB](https://www.rethinkdb.com/). -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage rethinkdb`. @@ -25,7 +25,7 @@ generate it using `telegraf --usage rethinkdb`. # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] ``` -### Metrics +## Metrics - rethinkdb - tags: @@ -44,7 +44,7 @@ generate it using `telegraf --usage rethinkdb`. - disk_usage_metadata_bytes (integer, bytes) - disk_usage_preallocated_bytes (integer, bytes) -+ rethinkdb_engine +- rethinkdb_engine - tags: - type - ns diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index a0c5e4ba8ae57..2f9c90f1e9e7c 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -3,8 +3,9 @@ package rethinkdb import ( "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -36,7 +37,7 @@ func TestAddEngineStats(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) + require.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) } } @@ -67,7 +68,7 @@ func TestAddEngineStatsPartial(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range missingKeys { - assert.False(t, acc.HasInt64Field("rethinkdb", metric)) + require.False(t, acc.HasInt64Field("rethinkdb", metric)) } } @@ -107,6 +108,6 @@ func TestAddStorageStats(t *testing.T) { storage.AddStats(&acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb", metric)) + require.True(t, acc.HasInt64Field("rethinkdb", metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index ffb63e64106e2..553deddcb0219 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,9 +9,9 @@ import ( "strconv" "strings" - "github.com/influxdata/telegraf" - "gopkg.in/gorethink/gorethink.v3" + + "github.com/influxdata/telegraf" ) type Server struct { @@ -37,7 +37,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator) error { return fmt.Errorf("error adding member stats, %s", err.Error()) } - if err := s.addTableStats(acc); err != nil { + if err := s.addTablesStats(acc); err != nil { return fmt.Errorf("error adding table stats, %s", err.Error()) } @@ -49,7 +49,7 @@ func (s *Server) validateVersion() error { return errors.New("could not determine the RethinkDB server version: process.version key missing") } - versionRegexp := regexp.MustCompile("\\d.\\d.\\d") + versionRegexp := regexp.MustCompile(`\d.\d.\d`) versionString := versionRegexp.FindString(s.serverStatus.Process.Version) if versionString == "" { return fmt.Errorf("could not determine the RethinkDB server version: malformed version string (%v)", s.serverStatus.Process.Version) @@ -161,7 +161,7 @@ var TableTracking = []string{ "total_writes", } -func (s *Server) addTableStats(acc telegraf.Accumulator) error { +func (s *Server) addTablesStats(acc telegraf.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) if err != nil { return fmt.Errorf("table stats query error, %s", err.Error()) @@ -174,23 +174,33 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { return errors.New("could not parse table_status results") } for _, table := range tables { - cursor, err := gorethink.DB("rethinkdb").Table("stats"). - Get([]string{"table_server", table.ID, s.serverStatus.ID}). - Run(s.session) + err = s.addTableStats(acc, table) if err != nil { - return fmt.Errorf("table stats query error, %s", err.Error()) - } - defer cursor.Close() - var ts tableStats - if err := cursor.One(&ts); err != nil { - return fmt.Errorf("failure to parse table stats, %s", err.Error()) + return err } + } + return nil +} - tags := s.getDefaultTags() - tags["type"] = "data" - tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) - ts.Engine.AddEngineStats(TableTracking, acc, tags) - ts.Storage.AddStats(acc, tags) +func (s *Server) addTableStats(acc telegraf.Accumulator, table tableStatus) error { + cursor, err := gorethink.DB("rethinkdb").Table("stats"). + Get([]string{"table_server", table.ID, s.serverStatus.ID}). + Run(s.session) + if err != nil { + return fmt.Errorf("table stats query error, %s", err.Error()) + } + defer cursor.Close() + + var ts tableStats + if err := cursor.One(&ts); err != nil { + return fmt.Errorf("failure to parse table stats, %s", err.Error()) } + + tags := s.getDefaultTags() + tags["type"] = "data" + tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) + ts.Engine.AddEngineStats(TableTracking, acc, tags) + ts.Storage.AddStats(acc, tags) + return nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 0119131900b61..0584dcc90c33b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -6,9 +6,9 @@ package rethinkdb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestValidateVersion(t *testing.T) { @@ -39,7 +39,7 @@ func TestAddClusterStats(t *testing.T) { require.NoError(t, err) for _, metric := range ClusterTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -50,7 +50,7 @@ func TestAddMemberStats(t *testing.T) { require.NoError(t, err) for _, metric := range MemberTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -61,7 +61,7 @@ func TestAddTableStats(t *testing.T) { require.NoError(t, err) for _, metric := range TableTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } keys := []string{ @@ -77,6 +77,6 @@ func TestAddTableStats(t *testing.T) { } for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md index a435eea4d7f63..f1a46af336ff9 100644 --- a/plugins/inputs/riak/README.md +++ b/plugins/inputs/riak/README.md @@ -2,7 +2,7 @@ The Riak plugin gathers metrics from one or more riak instances. -### Configuration: +## Configuration ```toml # Description @@ -11,7 +11,7 @@ The Riak plugin gathers metrics from one or more riak instances. servers = ["http://localhost:8098"] ``` -### Measurements & Fields: +## Measurements & Fields Riak provides one measurement named "riak", with the following fields: @@ -63,16 +63,16 @@ Riak provides one measurement named "riak", with the following fields: Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds. -### Tags: +## Tags All measurements have the following tags: - server (the host:port of the given server address, ex. `127.0.0.1:8087`) - nodename (the internal node name received, ex. `riak@127.0.0.1`) -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter riak --test > riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i,read_repair=0i,read_repairs_total=0i 1455913392622482332 ``` diff --git a/plugins/inputs/riemann_listener/README.md b/plugins/inputs/riemann_listener/README.md index 54e70be6ecb71..9110a5f1eb147 100644 --- a/plugins/inputs/riemann_listener/README.md +++ b/plugins/inputs/riemann_listener/README.md @@ -3,8 +3,7 @@ The Riemann Listener is a simple input plugin that listens for messages from client that use riemann clients using riemann-protobuff format. - -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -36,6 +35,7 @@ This is a sample configuration for the plugin. ## Defaults to the OS configuration. # keep_alive_period = "5m" ``` + Just like Riemann the default port is 5555. This can be configured, refer configuration above. Riemann `Service` is mapped as `measurement`. `metric` and `TTL` are converted into field values. diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go index a38d5989cb5d0..597e2b8847714 100644 --- a/plugins/inputs/riemann_listener/riemann_listener.go +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "fmt" "io" - "log" "net" "os" "os/signal" @@ -15,15 +14,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/metric" + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" + "google.golang.org/protobuf/proto" - "github.com/gogo/protobuf/proto" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - riemanngo "github.com/riemann/riemann-go-client" - riemangoProto "github.com/riemann/riemann-go-client/proto" ) type RiemannSocketListener struct { @@ -37,12 +36,12 @@ type RiemannSocketListener struct { wg sync.WaitGroup - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` telegraf.Accumulator } type setReadBufferer interface { - SetReadBuffer(bytes int) error + SetReadBuffer(sizeInBytes int) error } type riemannListener struct { @@ -162,13 +161,6 @@ func readMessages(r io.Reader, p []byte) error { return nil } -func checkError(err error) { - log.Println("The error is") - if err != nil { - log.Println(err.Error()) - } -} - func (rsl *riemannListener) read(conn net.Conn) { defer rsl.removeConnection(conn) defer conn.Close() @@ -187,7 +179,7 @@ func (rsl *riemannListener) read(conn net.Conn) { if err = binary.Read(conn, binary.BigEndian, &header); err != nil { if err.Error() != "EOF" { rsl.Log.Debugf("Failed to read header") - riemannReturnErrorResponse(conn, err.Error()) + rsl.riemannReturnErrorResponse(conn, err.Error()) return } return @@ -196,19 +188,19 @@ func (rsl *riemannListener) read(conn net.Conn) { if err = readMessages(conn, data); err != nil { rsl.Log.Debugf("Failed to read body: %s", err.Error()) - riemannReturnErrorResponse(conn, "Failed to read body") + rsl.riemannReturnErrorResponse(conn, "Failed to read body") return } if err = proto.Unmarshal(data, messagePb); err != nil { rsl.Log.Debugf("Failed to unmarshal: %s", err.Error()) - riemannReturnErrorResponse(conn, "Failed to unmarshal") + rsl.riemannReturnErrorResponse(conn, "Failed to unmarshal") return } riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) for _, m := range riemannEvents { if m.Service == "" { - riemannReturnErrorResponse(conn, "No Service Name") + rsl.riemannReturnErrorResponse(conn, "No Service Name") return } tags := make(map[string]string) @@ -224,53 +216,52 @@ func (rsl *riemannListener) read(conn net.Conn) { singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) rsl.AddMetric(singleMetric) } - riemannReturnResponse(conn) + rsl.riemannReturnResponse(conn) } } -func riemannReturnResponse(conn net.Conn) { +func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) { t := true message := new(riemangoProto.Msg) message.Ok = &t returnData, err := proto.Marshal(message) if err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) return } b := new(bytes.Buffer) if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } // send the msg length if _, err = conn.Write(b.Bytes()); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } if _, err = conn.Write(returnData); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } } -func riemannReturnErrorResponse(conn net.Conn, errorMessage string) { +func (rsl *riemannListener) riemannReturnErrorResponse(conn net.Conn, errorMessage string) { t := false message := new(riemangoProto.Msg) message.Ok = &t message.Error = &errorMessage returnData, err := proto.Marshal(message) if err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) return } b := new(bytes.Buffer) if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } // send the msg length if _, err = conn.Write(b.Bytes()); err != nil { - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } if _, err = conn.Write(returnData); err != nil { - log.Println("Somethign") - checkError(err) + rsl.Log.Errorf("The error is: %v", err) } } @@ -314,7 +305,7 @@ func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { ctx, cancelFunc := context.WithCancel(context.Background()) - go processOsSignals(cancelFunc) + go rsl.processOsSignals(cancelFunc) rsl.Accumulator = acc if rsl.ServiceAddress == "" { rsl.Log.Warnf("Using default service_address tcp://:5555") @@ -367,14 +358,13 @@ func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { } // Handle cancellations from the process -func processOsSignals(cancelFunc context.CancelFunc) { +func (rsl *RiemannSocketListener) processOsSignals(cancelFunc context.CancelFunc) { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, os.Interrupt) for { sig := <-signalChan - switch sig { - case os.Interrupt: - log.Println("Signal SIGINT is received, probably due to `Ctrl-C`, exiting ...") + if sig == os.Interrupt { + rsl.Log.Warn("Signal SIGINT is received, probably due to `Ctrl-C`, exiting...") cancelFunc() return } diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go index 92dc829ac1312..3f87944610312 100644 --- a/plugins/inputs/riemann_listener/riemann_listener_test.go +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -5,11 +5,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/testutil" riemanngo "github.com/riemann/riemann-go-client" "github.com/stretchr/testify/require" - "gotest.tools/assert" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestSocketListener_tcp(t *testing.T) { @@ -28,26 +28,26 @@ func TestSocketListener_tcp(t *testing.T) { testStats(t) testMissingService(t) } + func testStats(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() - if err != nil { - log.Println("Error") - panic(err) - } + require.NoError(t, err) defer c.Close() result, err := riemanngo.SendEvent(c, &riemanngo.Event{ Service: "hello", }) - assert.Equal(t, result.GetOk(), true) + require.NoError(t, err) + require.Equal(t, result.GetOk(), true) } + func testMissingService(t *testing.T) { c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) err := c.Connect() - if err != nil { - panic(err) - } + require.NoError(t, err) defer c.Close() result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) - assert.Equal(t, result.GetOk(), false) + require.Equal(t, false, result.GetOk()) + require.Equal(t, "No Service Name", result.GetError()) + require.NoError(t, err) } diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 6883f3a90b85f..26668212acce7 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -3,7 +3,7 @@ The Salesforce plugin gathers metrics about the limits in your Salesforce organization and the remaining usage. It fetches its data from the [limits endpoint](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_limits.htm) of Salesforce's REST API. -### Configuration: +## Configuration ```toml # Gather Metrics about Salesforce limits and remaining usage @@ -19,7 +19,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ # version = "39.0" ``` -### Measurements & Fields: +## Measurements & Fields Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. @@ -28,20 +28,19 @@ Each entry is converted to snake\_case and 2 fields are created. - \_remaining represents the usage remaining before hitting the limit threshold - salesforce - - \_max (int) - - \_remaining (int) - - (...) + - \_max (int) + - \_remaining (int) + - (...) -### Tags: +## Tags - All measurements have the following tags: - - host - - organization_id (t18 char organisation ID) + - host + - organization_id (t18 char organisation ID) +## Example Output -### Example Output: - -``` +```sh $./telegraf --config telegraf.conf --input-filter salesforce --test salesforce,organization_id=XXXXXXXXXXXXXXXXXX,host=xxxxx.salesforce.com daily_workflow_emails_max=546000i,hourly_time_based_workflow_max=50i,daily_async_apex_executions_remaining=250000i,daily_durable_streaming_api_events_remaining=1000000i,streaming_api_concurrent_clients_remaining=2000i,daily_bulk_api_requests_remaining=10000i,hourly_sync_report_runs_remaining=500i,daily_api_requests_max=5000000i,data_storage_mb_remaining=1073i,file_storage_mb_remaining=1069i,daily_generic_streaming_api_events_remaining=10000i,hourly_async_report_runs_remaining=1200i,hourly_time_based_workflow_remaining=50i,daily_streaming_api_events_remaining=1000000i,single_email_max=5000i,hourly_dashboard_refreshes_remaining=200i,streaming_api_concurrent_clients_max=2000i,daily_durable_generic_streaming_api_events_remaining=1000000i,daily_api_requests_remaining=4999998i,hourly_dashboard_results_max=5000i,hourly_async_report_runs_max=1200i,daily_durable_generic_streaming_api_events_max=1000000i,hourly_dashboard_results_remaining=5000i,concurrent_sync_report_runs_max=20i,durable_streaming_api_concurrent_clients_remaining=2000i,daily_workflow_emails_remaining=546000i,hourly_dashboard_refreshes_max=200i,daily_streaming_api_events_max=1000000i,hourly_sync_report_runs_max=500i,hourly_o_data_callout_max=10000i,mass_email_max=5000i,mass_email_remaining=5000i,single_email_remaining=5000i,hourly_dashboard_statuses_max=999999999i,concurrent_async_get_report_instances_max=200i,daily_durable_streaming_api_events_max=1000000i,daily_generic_streaming_api_events_max=10000i,hourly_o_data_callout_remaining=10000i,concurrent_sync_report_runs_remaining=20i,daily_bulk_api_requests_max=10000i,data_storage_mb_max=1073i,hourly_dashboard_statuses_remaining=999999999i,concurrent_async_get_report_instances_remaining=200i,daily_async_apex_executions_max=250000i,durable_streaming_api_concurrent_clients_max=2000i,file_storage_mb_max=1073i 1501565661000000000 diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index d9bcfe2e4544d..9de12f588b556 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -5,7 +5,8 @@ package installed. This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package. -### Configuration: +## Configuration + ```toml # Monitor sensors, requires lm-sensors package [[inputs.sensors]] @@ -17,19 +18,21 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se # timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields + Fields are created dynamically depending on the sensors. All fields are float. -### Tags: +## Tags - All measurements have the following tags: - - chip - - feature + - chip + - feature -### Example Output: +## Example Output -#### Default -``` +### Default + +```shell $ telegraf --config telegraf.conf --input-filter sensors --test * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000 @@ -39,8 +42,9 @@ $ telegraf --config telegraf.conf --input-filter sensors --test > sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000 ``` -#### With remove_numbers=false -``` +### With remove_numbers=false + +```shell * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000 > sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000 diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index be4cace6eab79..fe1d62ceceeb0 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -367,7 +367,7 @@ Vcore Voltage: // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd, _ := args[3], args[4:] if cmd == "sensors" { //nolint:errcheck,revive @@ -375,7 +375,9 @@ Vcore Voltage: } else { //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 9e5366706e5df..80413048ba658 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -6,7 +6,7 @@ accordance with the specification from [sflow.org](https://sflow.org/). Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are turned into metrics. Counters and other header samples are ignored. -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -18,7 +18,7 @@ avoid cardinality issues: - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration ```toml [[inputs.sflow]] @@ -33,7 +33,7 @@ avoid cardinality issues: # read_buffer_size = "" ``` -### Metrics +## Metrics - sflow - tags: @@ -81,34 +81,36 @@ avoid cardinality issues: - ip_flags (integer, ip_ver field of IPv4 structures) - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6)) -### Troubleshooting +## Troubleshooting The [sflowtool][] utility can be used to print sFlow packets, and compared against the metrics produced by Telegraf. -``` + +```sh sflowtool -p 6343 ``` If opening an issue, in addition to the output of sflowtool it will also be helpful to collect a packet capture. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 ``` [sflowtool]: https://github.com/sflow/sflowtool -### Example Output -``` +## Example Output + +```shell sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 ``` -### Reference Documentation +## Reference Documentation -This sflow implementation was built from the reference document +This sflow implementation was built from the reference document [sflow.org/sflow_version_5.txt](sflow_version_5) - [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index dec58e3f9afab..3fb37c396f9b5 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -1,19 +1,19 @@ # S.M.A.R.T. Input Plugin Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs) that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. -See smartmontools (https://www.smartmontools.org/). +See smartmontools (). SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. If no devices are specified, the plugin will scan for SMART devices via the following command: -``` +```sh smartctl --scan ``` Metrics will be reported from the following `smartctl` command: -``` +```sh smartctl --info --attributes --health -n --format=brief ``` @@ -23,41 +23,48 @@ Also, NVMe capabilities were introduced in version 6.5. To enable SMART on a storage device run: -``` +```sh smartctl -s on ``` + ## NVMe vendor specific attributes -For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature +For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature to easy access a vendor specific attributes. -This plugin supports nmve-cli version 1.5 and above (https://github.com/linux-nvme/nvme-cli). +This plugin supports nmve-cli version 1.5 and above (). In case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. Vendor specific SMART metrics for NVMe disks may be reported from the following `nvme` command: -``` +```sh nvme smart-log-add ``` Note that vendor plugins for `nvme-cli` could require different naming convention and report format. To see installed plugin extensions, depended on the nvme-cli version, look at the bottom of: -``` + +```sh nvme help ``` To gather disk vendor id (vid) `id-ctrl` could be used: -``` + +```sh nvme id-ctrl ``` -Association between a vid and company can be found there: https://pcisig.com/membership/member-companies. + +Association between a vid and company can be found there: . Devices affiliation to being NVMe or non NVMe will be determined thanks to: -``` + +```sh smartctl --scan ``` + and: -``` + +```sh smartctl --scan -d nvme ``` @@ -105,6 +112,14 @@ smartctl --scan -d nvme ## Timeout for the cli command to complete. # timeout = "30s" + + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + # read_method = "concurrent" ``` ## Permissions @@ -113,12 +128,14 @@ It's important to note that this plugin references smartctl and nvme-cli, which Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. You will need the following in your telegraf config: + ```toml [[inputs.smart]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # For smartctl add the following lines: @@ -131,6 +148,7 @@ Cmnd_Alias NVME = /path/to/nvme telegraf ALL=(ALL) NOPASSWD: NVME Defaults!NVME !logfile, !syslog, !pam_session ``` + To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartctl` or `path_nvme` in the configuration should be set to execute this script. @@ -171,57 +189,84 @@ To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartct - value - worst -#### Flags +### Flags The interpretation of the tag `flags` is: - - `K` auto-keep - - `C` event count - - `R` error rate - - `S` speed/performance - - `O` updated online - - `P` prefailure warning -#### Exit Status +- `K` auto-keep +- `C` event count +- `R` error rate +- `S` speed/performance +- `O` updated online +- `P` prefailure warning + +### Exit Status The `exit_status` field captures the exit status of the used cli utilities command which is defined by a bitmask. For the interpretation of the bitmask see the man page for smartctl or nvme-cli. ## Device Names + Device names, e.g., `/dev/sda`, are *not persistent*, and may be subject to change across reboots or system changes. Instead, you can use the *World Wide Name* (WWN) or serial number to identify devices. On Linux block devices can be referenced by the WWN in the following location: `/dev/disk/by-id/`. + ## Troubleshooting + If you expect to see more SMART metrics than this plugin shows, be sure to use a proper version of smartctl or nvme-cli utility which has the functionality to gather desired data. Also, check -your device capability because not every SMART metrics are mandatory. +your device capability because not every SMART metrics are mandatory. For example the number of temperature sensors depends on the device specification. If this plugin is not working as expected for your SMART enabled device, please run these commands and include the output in a bug report: For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe devices by default): -``` + +```sh smartctl --scan ``` + For NVMe devices: -``` + +```sh smartctl --scan -d nvme ``` + Run the following command replacing your configuration setting for NOCHECK and the DEVICE (name of the device could be taken from the previous command): -``` + +```sh smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE ``` -If you try to gather vendor specific metrics, please provide this commad + +If you try to gather vendor specific metrics, please provide this command and replace vendor and device to match your case: -``` + +```sh nvme VENDOR smart-log-add DEVICE ``` -## Example SMART Plugin Outputs + +If you have specified devices array in configuration file, and Telegraf only shows data from one device, you should +change the plugin configuration to sequentially gather disk attributes instead of collecting it in separate threads +(goroutines). To do this find in plugin configuration read_method and change it to sequential: + +```toml + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + read_method = "sequential" ``` + +## Example SMART Plugin Outputs + +```shell smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000 diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index b0f189d69fbf9..cc6b40e94fcec 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -43,8 +43,8 @@ var ( // PASSED, FAILED, UNKNOWN smartOverallHealth = regexp.MustCompile(`^(SMART overall-health self-assessment test result|SMART Health Status):\s+(\w+).*$`) - // sasNvmeAttr is a SAS or NVME SMART attribute - sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) + // sasNVMeAttr is a SAS or NVMe SMART attribute + sasNVMeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 @@ -53,14 +53,26 @@ var ( attribute = regexp.MustCompile(`^\s*([0-9]+)\s(\S+)\s+([-P][-O][-S][-R][-C][-K])\s+([0-9]+)\s+([0-9]+)\s+([0-9-]+)\s+([-\w]+)\s+([\w\+\.]+).*$`) // Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff + // nvme version 1.14+ metrics: + // ID KEY Normalized Raw + // 0xab program_fail_count 100 0 + + // nvme deprecated metric format: // key normalized raw // program_fail_count : 100% 0 - intelExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\w\s]+)%(.+)`) + + // REGEX patter supports deprecated metrics (nvme-cli version below 1.14) and metrics from nvme-cli 1.14 (and above). + intelExpressionPattern = regexp.MustCompile(`^([A-Za-z0-9_\s]+)[:|\s]+(\d+)[%|\s]+(.+)`) // vid : 0x8086 // sn : CFGT53260XSP8011P nvmeIDCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) + // Format from nvme-cli 1.14 (and above) gives ID and KEY, this regex is for separating id from key. + // ID KEY + // 0xab program_fail_count + nvmeIDSeparatePattern = regexp.MustCompile(`^([A-Za-z0-9_]+)(.+)`) + deviceFieldIds = map[string]string{ "1": "read_error_rate", "7": "seek_error_rate", @@ -70,7 +82,7 @@ var ( } // to obtain metrics from smartctl - sasNvmeAttributes = map[string]struct { + sasNVMeAttributes = map[string]struct { ID string Name string Parse func(fields, deviceFields map[string]interface{}, str string) error @@ -213,12 +225,51 @@ var ( Parse: parseTemperatureSensor, }, } - - // to obtain Intel specific metrics from nvme-cli + // To obtain Intel specific metrics from nvme-cli version 1.14 and above. intelAttributes = map[string]struct { ID string Name string Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error + }{ + "program_fail_count": { + Name: "Program_Fail_Count", + }, + "erase_fail_count": { + Name: "Erase_Fail_Count", + }, + "wear_leveling_count": { // previously: "wear_leveling" + Name: "Wear_Leveling_Count", + }, + "e2e_error_detect_count": { // previously: "end_to_end_error_detection_count" + Name: "End_To_End_Error_Detection_Count", + }, + "crc_error_count": { + Name: "Crc_Error_Count", + }, + "media_wear_percentage": { // previously: "timed_workload_media_wear" + Name: "Media_Wear_Percentage", + }, + "host_reads": { + Name: "Host_Reads", + }, + "timed_work_load": { // previously: "timed_workload_timer" + Name: "Timed_Workload_Timer", + }, + "thermal_throttle_status": { + Name: "Thermal_Throttle_Status", + }, + "retry_buff_overflow_count": { // previously: "retry_buffer_overflow_count" + Name: "Retry_Buffer_Overflow_Count", + }, + "pll_lock_loss_counter": { // previously: "pll_lock_loss_count" + Name: "Pll_Lock_Loss_Count", + }, + } + // to obtain Intel specific metrics from nvme-cli + intelAttributesDeprecatedFormat = map[string]struct { + ID string + Name string + Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error }{ "program_fail_count": { Name: "Program_Fail_Count", @@ -269,6 +320,8 @@ var ( Parse: parseBytesWritten, }, } + + knownReadMethods = []string{"concurrent", "sequential"} ) // Smart plugin reads metrics from storage devices supporting S.M.A.R.T. @@ -283,6 +336,7 @@ type Smart struct { Devices []string `toml:"devices"` UseSudo bool `toml:"use_sudo"` Timeout config.Duration `toml:"timeout"` + ReadMethod string `toml:"read_method"` Log telegraf.Logger `toml:"-"` } @@ -333,11 +387,20 @@ var sampleConfig = ` ## Timeout for the cli command to complete. # timeout = "30s" + + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + # read_method = "concurrent" ` func newSmart() *Smart { return &Smart{ - Timeout: config.Duration(time.Second * 30), + Timeout: config.Duration(time.Second * 30), + ReadMethod: "concurrent", } } @@ -368,6 +431,10 @@ func (m *Smart) Init() error { m.PathNVMe, _ = exec.LookPath("nvme") } + if !contains(knownReadMethods, m.ReadMethod) { + return fmt.Errorf("provided read method `%s` is not valid", m.ReadMethod) + } + err := validatePath(m.PathSmartctl) if err != nil { m.PathSmartctl = "" @@ -404,9 +471,9 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - NVMeDevices := distinguishNVMeDevices(devicesFromConfig, scannedNVMeDevices) + nvmeDevices := distinguishNVMeDevices(devicesFromConfig, scannedNVMeDevices) - m.getVendorNVMeAttributes(acc, NVMeDevices) + m.getVendorNVMeAttributes(acc, nvmeDevices) } return nil } @@ -434,28 +501,28 @@ func (m *Smart) scanAllDevices(ignoreExcludes bool) ([]string, []string, error) } // this will return only NVMe devices - NVMeDevices, err := m.scanDevices(ignoreExcludes, "--scan", "--device=nvme") + nvmeDevices, err := m.scanDevices(ignoreExcludes, "--scan", "--device=nvme") if err != nil { return nil, nil, err } // to handle all versions of smartctl this will return only non NVMe devices - nonNVMeDevices := difference(devices, NVMeDevices) - return NVMeDevices, nonNVMeDevices, nil + nonNVMeDevices := difference(devices, nvmeDevices) + return nvmeDevices, nonNVMeDevices, nil } func distinguishNVMeDevices(userDevices []string, availableNVMeDevices []string) []string { - var NVMeDevices []string + var nvmeDevices []string for _, userDevice := range userDevices { - for _, NVMeDevice := range availableNVMeDevices { + for _, availableNVMeDevice := range availableNVMeDevices { // double check. E.g. in case when nvme0 is equal nvme0n1, will check if "nvme0" part is present. - if strings.Contains(NVMeDevice, userDevice) || strings.Contains(userDevice, NVMeDevice) { - NVMeDevices = append(NVMeDevices, userDevice) + if strings.Contains(availableNVMeDevice, userDevice) || strings.Contains(userDevice, availableNVMeDevice) { + nvmeDevices = append(nvmeDevices, userDevice) } } } - return NVMeDevices + return nvmeDevices } // Scan for S.M.A.R.T. devices from smartctl @@ -506,69 +573,86 @@ func excludedDev(excludes []string, deviceLine string) bool { func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { var wg sync.WaitGroup wg.Add(len(devices)) - for _, device := range devices { - go gatherDisk(acc, m.Timeout, m.UseSudo, m.Attributes, m.PathSmartctl, m.Nocheck, device, &wg) + switch m.ReadMethod { + case "concurrent": + go m.gatherDisk(acc, device, &wg) + case "sequential": + m.gatherDisk(acc, device, &wg) + default: + wg.Done() + } } wg.Wait() } func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []string) { - NVMeDevices := getDeviceInfoForNVMeDisks(acc, devices, m.PathNVMe, m.Timeout, m.UseSudo) + nvmeDevices := getDeviceInfoForNVMeDisks(acc, devices, m.PathNVMe, m.Timeout, m.UseSudo) var wg sync.WaitGroup - for _, device := range NVMeDevices { + for _, device := range nvmeDevices { if contains(m.EnableExtensions, "auto-on") { + // nolint:revive // one case switch on purpose to demonstrate potential extensions switch device.vendorID { case intelVID: wg.Add(1) - go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + switch m.ReadMethod { + case "concurrent": + go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + case "sequential": + gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + default: + wg.Done() + } } } else if contains(m.EnableExtensions, "Intel") && device.vendorID == intelVID { wg.Add(1) - go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + switch m.ReadMethod { + case "concurrent": + go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + case "sequential": + gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + default: + wg.Done() + } } } wg.Wait() } func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice { - var NVMeDevices []nvmeDevice + var nvmeDevices []nvmeDevice for _, device := range devices { - vid, sn, mn, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) + newDevice, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) if err != nil { acc.AddError(fmt.Errorf("cannot find device info for %s device", device)) continue } - newDevice := nvmeDevice{ - name: device, - vendorID: vid, - model: mn, - serialNumber: sn, - } - NVMeDevices = append(NVMeDevices, newDevice) + nvmeDevices = append(nvmeDevices, newDevice) } - return NVMeDevices + return nvmeDevices } -func gatherNVMeDeviceInfo(nvme, device string, timeout config.Duration, useSudo bool) (string, string, string, error) { +func gatherNVMeDeviceInfo(nvme, deviceName string, timeout config.Duration, useSudo bool) (device nvmeDevice, err error) { args := []string{"id-ctrl"} - args = append(args, strings.Split(device, " ")...) + args = append(args, strings.Split(deviceName, " ")...) out, err := runCmd(timeout, useSudo, nvme, args...) if err != nil { - return "", "", "", err + return device, err } outStr := string(out) - - vid, sn, mn, err := findNVMeDeviceInfo(outStr) - - return vid, sn, mn, err + device, err = findNVMeDeviceInfo(outStr) + if err != nil { + return device, err + } + device.name = deviceName + return device, nil } -func findNVMeDeviceInfo(output string) (string, string, string, error) { +func findNVMeDeviceInfo(output string) (nvmeDevice, error) { scanner := bufio.NewScanner(strings.NewReader(output)) var vid, sn, mn string @@ -580,7 +664,7 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { matches[2] = strings.TrimSpace(matches[2]) if matches[1] == "vid" { if _, err := fmt.Sscanf(matches[2], "%s", &vid); err != nil { - return "", "", "", err + return nvmeDevice{}, err } } if matches[1] == "sn" { @@ -591,7 +675,13 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { } } } - return vid, sn, mn, nil + + newDevice := nvmeDevice{ + vendorID: vid, + model: mn, + serialNumber: sn, + } + return newDevice, nil } func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { @@ -619,10 +709,31 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, uses tags["model"] = device.model tags["serial_no"] = device.serialNumber - if matches := intelExpressionPattern.FindStringSubmatch(line); len(matches) > 3 { - matches[1] = strings.TrimSpace(matches[1]) + // Create struct to initialize later with intel attributes. + var ( + attr = struct { + ID string + Name string + Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error + }{} + attrExists bool + ) + + if matches := intelExpressionPattern.FindStringSubmatch(line); len(matches) > 3 && len(matches[1]) > 1 { + // Check if nvme shows metrics in deprecated format or in format with ID. + // Based on that, an attribute map with metrics is chosen. + // If string has more than one character it means it has KEY there, otherwise it's empty string (""). + if separatedIDAndKey := nvmeIDSeparatePattern.FindStringSubmatch(matches[1]); len(strings.TrimSpace(separatedIDAndKey[2])) > 1 { + matches[1] = strings.TrimSpace(separatedIDAndKey[2]) + attr, attrExists = intelAttributes[matches[1]] + } else { + matches[1] = strings.TrimSpace(matches[1]) + attr, attrExists = intelAttributesDeprecatedFormat[matches[1]] + } + matches[3] = strings.TrimSpace(matches[3]) - if attr, ok := intelAttributes[matches[1]]; ok { + + if attrExists { tags["name"] = attr.Name if attr.ID != "" { tags["id"] = attr.ID @@ -641,18 +752,18 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, uses } } -func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { +func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n - args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} + args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", m.Nocheck, "--format=brief"} args = append(args, strings.Split(device, " ")...) - out, e := runCmd(timeout, usesudo, smartctl, args...) + out, e := runCmd(m.Timeout, m.UseSudo, m.PathSmartctl, args...) outStr := string(out) // Ignore all exit statuses except if it is a command line parse error exitStatus, er := exitStatus(e) if er != nil { - acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", smartctl, strings.Join(args, " "), e, outStr)) + acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", m.PathSmartctl, strings.Join(args, " "), e, outStr)) return } @@ -712,7 +823,7 @@ func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, coll tags := map[string]string{} fields := make(map[string]interface{}) - if collectAttributes { + if m.Attributes { //add power mode keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled", "power"} for _, key := range keys { @@ -724,8 +835,8 @@ func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, coll attr := attribute.FindStringSubmatch(line) if len(attr) > 1 { - // attribute has been found, add it only if collectAttributes is true - if collectAttributes { + // attribute has been found, add it only if m.Attributes is true + if m.Attributes { tags["id"] = attr[1] tags["name"] = attr[2] tags["flags"] = attr[3] @@ -758,8 +869,8 @@ func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, coll } } else { // what was found is not a vendor attribute - if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { - if attr, ok := sasNvmeAttributes[matches[1]]; ok { + if matches := sasNVMeAttr.FindStringSubmatch(line); len(matches) > 2 { + if attr, ok := sasNVMeAttributes[matches[1]]; ok { tags["name"] = attr.Name if attr.ID != "" { tags["id"] = attr.ID @@ -774,8 +885,8 @@ func gatherDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo, coll continue } // if the field is classified as an attribute, only add it - // if collectAttributes is true - if collectAttributes { + // if m.Attributes is true + if m.Attributes { acc.AddFields("smart_attribute", fields, tags) } } @@ -972,13 +1083,13 @@ func parseTemperatureSensor(fields, _ map[string]interface{}, str string) error return nil } -func validatePath(path string) error { - pathInfo, err := os.Stat(path) +func validatePath(filePath string) error { + pathInfo, err := os.Stat(filePath) if os.IsNotExist(err) { - return fmt.Errorf("provided path does not exist: [%s]", path) + return fmt.Errorf("provided path does not exist: [%s]", filePath) } if mode := pathInfo.Mode(); !mode.IsRegular() { - return fmt.Errorf("provided path does not point to a regular file: [%s]", path) + return fmt.Errorf("provided path does not point to a regular file: [%s]", filePath) } return nil } diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 5a1799381cebe..6801ca764afa5 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -24,11 +24,11 @@ func TestGatherAttributes(t *testing.T) { if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil } else if args[0] == "--info" && args[7] == "/dev/nvme0" { - return []byte(smartctlNvmeInfoData), nil + return []byte(smartctlNVMeInfoData), nil } else if args[0] == "--scan" && len(args) == 1 { return []byte(mockScanData), nil } else if args[0] == "--scan" && len(args) >= 2 && args[1] == "--device=nvme" { - return []byte(mockScanNvmeData), nil + return []byte(mockScanNVMeData), nil } } return nil, errors.New("command not found") @@ -45,7 +45,7 @@ func TestGatherAttributes(t *testing.T) { s.PathSmartctl = "smartctl" s.PathNVMe = "" - t.Run("Only non nvme device", func(t *testing.T) { + t.Run("Only non NVMe device", func(t *testing.T) { s.Devices = []string{"/dev/ada0"} var acc testutil.Accumulator @@ -62,7 +62,7 @@ func TestGatherAttributes(t *testing.T) { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } }) - t.Run("Only nvme device", func(t *testing.T) { + t.Run("Only NVMe device", func(t *testing.T) { s.Devices = []string{"/dev/nvme0"} var acc testutil.Accumulator @@ -71,12 +71,78 @@ func TestGatherAttributes(t *testing.T) { require.NoError(t, err) assert.Equal(t, 32, acc.NFields(), "Wrong number of fields gathered") - testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, testSmartctlNVMeAttributes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) }) }) } +func TestGatherInParallelMode(t *testing.T) { + s := newSmart() + s.Attributes = true + s.PathSmartctl = "smartctl" + s.PathNVMe = "nvmeIdentifyController" + s.EnableExtensions = append(s.EnableExtensions, "auto-on") + s.Devices = []string{"/dev/nvme0"} + + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + if len(args) > 0 { + if args[0] == "--info" && args[7] == "/dev/ada0" { + return []byte(mockInfoAttributeData), nil + } else if args[0] == "--info" && args[7] == "/dev/nvmeIdentifyController" { + return []byte(smartctlNVMeInfoData), nil + } else if args[0] == "--scan" && len(args) == 1 { + return []byte(mockScanData), nil + } else if args[0] == "--scan" && len(args) >= 2 && args[1] == "--device=nvme" { + return []byte(mockScanNVMeData), nil + } else if args[0] == "intel" && args[1] == "smart-log-add" { + return []byte(nvmeIntelInfoDataMetricsFormat), nil + } else if args[0] == "id-ctrl" { + return []byte(nvmeIdentifyController), nil + } + } + return nil, errors.New("command not found") + } + + t.Run("Gather NVMe device info in goroutine", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "concurrent" + + err := s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, + testutil.SortMetrics(), testutil.IgnoreTime()) + }) + + t.Run("Gather NVMe device info sequentially", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "sequential" + + err := s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, + testutil.SortMetrics(), testutil.IgnoreTime()) + }) + + t.Run("Gather NVMe device info - not known read method", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "horizontally" + + err := s.Init() + require.Error(t, err) + + err = s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, []telegraf.Metric{}, result) + }) +} + func TestGatherNoAttributes(t *testing.T) { s := newSmart() s.Attributes = false @@ -90,9 +156,9 @@ func TestGatherNoAttributes(t *testing.T) { } else if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil } else if args[0] == "--info" && args[7] == "/dev/nvme0" { - return []byte(smartctlNvmeInfoData), nil + return []byte(smartctlNVMeInfoData), nil } else if args[0] == "--scan" && args[1] == "--device=nvme" { - return []byte(mockScanNvmeData), nil + return []byte(mockScanNVMeData), nil } } return nil, errors.New("command not found") @@ -111,7 +177,7 @@ func TestGatherNoAttributes(t *testing.T) { for _, test := range testsAda0Device { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } - for _, test := range testNvmeDevice { + for _, test := range testNVMeDevice { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } }) @@ -123,6 +189,16 @@ func TestExcludedDev(t *testing.T) { assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") } +var ( + sampleSmart = Smart{ + PathSmartctl: "", + Nocheck: "", + Attributes: true, + UseSudo: true, + Timeout: config.Duration(time.Second * 30), + } +) + func TestGatherSATAInfo(t *testing.T) { runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData), nil @@ -134,7 +210,8 @@ func TestGatherSATAInfo(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -150,7 +227,7 @@ func TestGatherSATAInfo65(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -166,7 +243,7 @@ func TestGatherHgstSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -182,7 +259,7 @@ func TestGatherHtSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) testutil.RequireMetricsEqual(t, testHtsasAtributtes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } @@ -198,7 +275,7 @@ func TestGatherSSD(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } @@ -214,14 +291,14 @@ func TestGatherSSDRaid(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } -func TestGatherNvme(t *testing.T) { +func TestGatherNVMe(t *testing.T) { runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(smartctlNvmeInfoData), nil + return []byte(smartctlNVMeInfoData), nil } var ( @@ -230,15 +307,38 @@ func TestGatherNvme(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) + sampleSmart.gatherDisk(acc, "nvme0", wg) + + testutil.RequireMetricsEqual(t, testSmartctlNVMeAttributes, acc.GetTelegrafMetrics(), + testutil.SortMetrics(), testutil.IgnoreTime()) +} + +func TestGatherIntelNVMeMetrics(t *testing.T) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(nvmeIntelInfoDataMetricsFormat), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + device = nvmeDevice{ + name: "nvme0", + model: mockModel, + serialNumber: mockSerial, + } + ) + + wg.Add(1) + gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) - testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, testutil.SortMetrics(), testutil.IgnoreTime()) } -func TestGatherIntelNvme(t *testing.T) { +func TestGatherIntelNVMeDeprecatedFormatMetrics(t *testing.T) { runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(nvmeIntelInfoData), nil + return []byte(nvmeIntelInfoDataDeprecatedMetricsFormat), nil } var ( @@ -255,17 +355,17 @@ func TestGatherIntelNvme(t *testing.T) { gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) result := acc.GetTelegrafMetrics() - testutil.RequireMetricsEqual(t, testIntelInvmeAttributes, result, + testutil.RequireMetricsEqual(t, testIntelNVMeAttributes, result, testutil.SortMetrics(), testutil.IgnoreTime()) } func Test_findVIDFromNVMeOutput(t *testing.T) { - vid, sn, mn, err := findNVMeDeviceInfo(nvmeIdentifyController) + device, err := findNVMeDeviceInfo(nvmeIdentifyController) assert.Nil(t, err) - assert.Equal(t, "0x8086", vid) - assert.Equal(t, "CVFT5123456789ABCD", sn) - assert.Equal(t, "INTEL SSDPEDABCDEFG", mn) + assert.Equal(t, "0x8086", device.vendorID) + assert.Equal(t, "CVFT5123456789ABCD", device.serialNumber) + assert.Equal(t, "INTEL SSDPEDABCDEFG", device.model) } func Test_checkForNVMeDevices(t *testing.T) { @@ -293,7 +393,7 @@ func Test_difference(t *testing.T) { func Test_integerOverflow(t *testing.T) { runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(smartctlNvmeInfoDataWithOverflow), nil + return []byte(smartctlNVMeInfoDataWithOverflow), nil } var ( @@ -303,7 +403,8 @@ func Test_integerOverflow(t *testing.T) { t.Run("If data raw_value is out of int64 range, there should be no metrics for that attribute", func(t *testing.T) { wg.Add(1) - gatherDisk(acc, config.Duration(time.Second*30), true, true, "", "", "nvme0", wg) + + sampleSmart.gatherDisk(acc, "nvme0", wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testOverflowAttributes, result, @@ -656,7 +757,7 @@ var ( mockModel = "INTEL SSDPEDABCDEFG" mockSerial = "CVFT5123456789ABCD" - testSmartctlNvmeAttributes = []telegraf.Metric{ + testSmartctlNVMeAttributes = []telegraf.Metric{ testutil.MustMetric("smart_device", map[string]string{ "device": "nvme0", @@ -1045,7 +1146,7 @@ var ( }, } - testNvmeDevice = []struct { + testNVMeDevice = []struct { fields map[string]interface{} tags map[string]string }{ @@ -1063,7 +1164,7 @@ var ( }, } - testIntelInvmeAttributes = []telegraf.Metric{ + testIntelNVMeAttributes = []telegraf.Metric{ testutil.MustMetric("smart_attribute", map[string]string{ "device": "nvme0", @@ -1257,11 +1358,146 @@ var ( time.Now(), ), } + + testIntelNVMeNewFormatAttributes = []telegraf.Metric{ + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Program_Fail_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Erase_Fail_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Wear_Leveling_Count", + }, + map[string]interface{}{ + "raw_value": int64(700090417315), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "End_To_End_Error_Detection_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Crc_Error_Count", + }, + map[string]interface{}{ + "raw_value": 13, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Media_Wear_Percentage", + }, + map[string]interface{}{ + "raw_value": 552, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Host_Reads", + }, + map[string]interface{}{ + "raw_value": 73, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Timed_Workload_Timer", + }, + map[string]interface{}{ + "raw_value": int64(2343038), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Thermal_Throttle_Status", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Retry_Buffer_Overflow_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Pll_Lock_Loss_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + } // smartctl --scan mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device` // smartctl --scan -d nvme - mockScanNvmeData = `/dev/nvme0 -d nvme # /dev/nvme0, NVMe device` + mockScanNVMeData = `/dev/nvme0 -d nvme # /dev/nvme0, NVMe device` // smartctl --info --health --attributes --tolerance=verypermissive -n standby --format=brief [DEVICE] mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) @@ -1670,7 +1906,7 @@ Selective self-test flags (0x0): After scanning selected spans, do NOT read-scan remainder of disk. If Selective self-test is pending on power-up, resume after 0 minute delay. ` - smartctlNvmeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) + smartctlNVMeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org === START OF INFORMATION SECTION === @@ -1720,14 +1956,14 @@ Temperature Sensor 7: 44 C Temperature Sensor 8: 43 C ` - smartctlNvmeInfoDataWithOverflow = ` + smartctlNVMeInfoDataWithOverflow = ` Temperature Sensor 1: 9223372036854775808 C Temperature Sensor 2: -9223372036854775809 C Temperature Sensor 3: 9223372036854775807 C Temperature Sensor 4: -9223372036854775808 C ` - nvmeIntelInfoData = `Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff + nvmeIntelInfoDataDeprecatedMetricsFormat = `Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff key normalized raw program_fail_count : 100% 0 erase_fail_count : 100% 0 @@ -1742,6 +1978,20 @@ retry_buffer_overflow_count : 100% 0 pll_lock_loss_count : 100% 0 nand_bytes_written : 0% sectors: 0 host_bytes_written : 0% sectors: 0 +` + nvmeIntelInfoDataMetricsFormat = `Additional Smart Log for NVME device:nvme0n1 namespace-id:ffffffff +ID KEY Normalized Raw +0xab program_fail_count 100 0 +0xac erase_fail_count 100 0 +0xad wear_leveling_count 100 700090417315 +0xb8 e2e_error_detect_count 100 0 +0xc7 crc_error_count 100 13 +0xe2 media_wear_percentage 100 552 +0xe3 host_reads 100 73 +0xe4 timed_work_load 100 2343038 +0xea thermal_throttle_status 100 0 +0xf0 retry_buff_overflow_count 100 0 +0xf3 pll_lock_loss_counter 100 0 ` nvmeIdentifyController = `NVME Identify Controller: diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index 3728cddb34349..27158133efe6b 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -4,21 +4,13 @@ The `snmp` input plugin uses polling to gather metrics from SNMP agents. Support for gathering individual OIDs as well as complete SNMP tables is included. -### Prerequisites +## Note about Paths -This plugin uses the `snmptable` and `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. +## Configuration -### Configuration ```toml [[inputs.snmp]] ## Agent addresses to retrieve values from. @@ -37,6 +29,9 @@ information. ## SNMP version; can be 1, 2, or 3. # version = 2 + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## SNMP community string. # community = "public" @@ -91,13 +86,13 @@ information. is_tag = true ``` -#### Configure SNMP Requests +### Configure SNMP Requests This plugin provides two methods for configuring the SNMP requests: `fields` and `tables`. Use the `field` option to gather single ad-hoc variables. To collect SNMP tables, use the `table` option. -##### Field +#### Field Use a `field` to collect a variable by OID. Requests specified with this option operate similar to the `snmpget` utility. @@ -138,7 +133,7 @@ option operate similar to the `snmpget` utility. # conversion = "" ``` -##### Table +#### Table Use a `table` to configure the collection of a SNMP table. SNMP requests formed with this option operate similarly way to the `snmptable` command. @@ -201,7 +196,7 @@ One [metric][] is created for each row of the SNMP table. ## Specifies if the value of given field should be snmptranslated ## by default no field values are translated # translate = true - + ## Secondary index table allows to merge data from two tables with ## different index that this filed will be used to join them. There can ## be only one secondary index table. @@ -220,27 +215,30 @@ One [metric][] is created for each row of the SNMP table. # secondary_outer_join = false ``` -##### Two Table Join +#### Two Table Join + Snmp plugin can join two snmp tables that have different indexes. For this to work one table should have translation field that return index of second table as value. Examples of such fields are: - * Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, + +* Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, which value is IfIndex from ifTable - * Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +* Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, which value is IfIndex from ifTable - * Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +* Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, which value is index from entPhysicalTable - + Such field can be used to translate index to secondary table with `secondary_index_table = true` and all fields from secondary table (with index pointed from translation field), should have added option `secondary_index_use = true`. Telegraf cannot duplicate entries during join so translation must be 1-to-1 (not 1-to-many). To add fields from secondary table with index that is not present in translation table (outer join), there is a second option for translation index `secondary_outer_join = true`. -###### Example configuration for table joins +##### Example configuration for table joins CISCO-POWER-ETHERNET-EXT-MIB table before join: -``` + +```toml [[inputs.snmp.table]] name = "ciscoPower" index_as_tag = true @@ -255,14 +253,16 @@ oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" ``` Partial result (removed agent_host and host columns from all following outputs in this section): -``` + +```text > ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 > ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 > ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 ``` Note here that EntPhyIndex column carries index from ENTITY-MIB table, config for it: -``` + +```toml [[inputs.snmp.table]] name = "entityTable" index_as_tag = true @@ -271,8 +271,10 @@ index_as_tag = true name = "EntPhysicalName" oid = "ENTITY-MIB::entPhysicalName" ``` + Partial result: -``` + +```text > entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 > entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 > entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 @@ -282,7 +284,7 @@ Now, lets attempt to join these results into one table. EntPhyIndex matches inde from second table, and lets convert EntPhysicalName into tag, so second table will only provide tags into result. Configuration: -``` +```toml [[inputs.snmp.table]] name = "ciscoPowerEntity" index_as_tag = true @@ -304,47 +306,50 @@ is_tag = true ``` Result: -``` + +```text > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 > ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 ``` -### Troubleshooting +## Troubleshooting Check that a numeric field can be translated to a textual field: -``` + +```sh $ snmptranslate .1.3.6.1.2.1.1.3.0 DISMAN-EVENT-MIB::sysUpTimeInstance ``` Request a top-level field: -``` -$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 + +```sh +snmpget -v2c -c public 127.0.0.1 sysUpTime.0 ``` Request a table: -``` -$ snmptable -v2c -c public 127.0.0.1 ifTable + +```sh +snmptable -v2c -c public 127.0.0.1 ifTable ``` To collect a packet capture, run this command in the background while running Telegraf or one of the above commands. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 ``` -### Example Output +## Example Output -``` +```shell snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 ``` -[net-snmp]: http://www.net-snmp.org/ -[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK [metric filtering]: /docs/CONFIGURATION.md#metric-filtering [metric]: /docs/METRICS.md diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index a2259e88179c2..193332959dbfa 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -1,26 +1,22 @@ package snmp import ( - "bufio" - "bytes" "encoding/binary" "errors" "fmt" - "log" "math" "net" - "os/exec" "strconv" "strings" "sync" "time" "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/wlog" ) const description = `Retrieves SNMP values from remote agents` @@ -41,6 +37,9 @@ const sampleConfig = ` ## SNMP version; can be 1, 2, or 3. # version = 2 + ## Path to mib files + # path = ["/usr/share/snmp/mibs"] + ## Agent host tag; the tag used to reference the source host # agent_host_tag = "agent_host" @@ -69,36 +68,12 @@ const sampleConfig = ` # priv_protocol = "" ## Privacy password used for encrypted messages. # priv_password = "" - + ## Add fields and tables defining the variables you wish to collect. This ## example collects the system uptime and interface variables. Reference the ## full plugin documentation for configuration details. ` -// execCommand is so tests can mock out exec.Command usage. -var execCommand = exec.Command - -// execCmd executes the specified command, returning the STDOUT content. -// If command exits with error status, the output is captured into the returned error. -func execCmd(arg0 string, args ...string) ([]byte, error) { - if wlog.LogLevel() == wlog.DEBUG { - quoted := make([]string, 0, len(args)) - for _, arg := range args { - quoted = append(quoted, fmt.Sprintf("%q", arg)) - } - log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) - } - - out, err := execCommand(arg0, args...).Output() - if err != nil { - if err, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err) - } - return nil, err - } - return out, nil -} - // Snmp holds the configuration for the plugin. type Snmp struct { // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. @@ -119,12 +94,14 @@ type Snmp struct { Fields []Field `toml:"field"` connectionCache []snmpConnection - initialized bool + + Log telegraf.Logger `toml:"-"` } -func (s *Snmp) init() error { - if s.initialized { - return nil +func (s *Snmp) Init() error { + err := snmp.LoadMibsFromPath(s.Path, s.Log) + if err != nil { + return err } s.connectionCache = make([]snmpConnection, len(s.Agents)) @@ -145,7 +122,6 @@ func (s *Snmp) init() error { s.AgentHostTag = "agent_host" } - s.initialized = true return nil } @@ -351,6 +327,7 @@ func init() { MaxRepetitions: 10, Timeout: config.Duration(5 * time.Second), Version: 2, + Path: []string{"/usr/share/snmp/mibs"}, Community: "public", }, } @@ -371,10 +348,6 @@ func (s *Snmp) Description() string { // Any error encountered does not halt the process. The errors are accumulated // and returned at the end. func (s *Snmp) Gather(acc telegraf.Accumulator) error { - if err := s.init(); err != nil { - return err - } - var wg sync.WaitGroup for i, agent := range s.Agents { wg.Add(1) @@ -679,7 +652,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case float32: v = float64(vt) / math.Pow10(d) case float64: - v = float64(vt) / math.Pow10(d) + v = vt / math.Pow10(d) case int: v = float64(vt) / math.Pow10(d) case int8: @@ -766,7 +739,8 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return v, nil } - if endian == "LittleEndian" { + switch endian { + case "LittleEndian": switch bit { case "uint64": v = binary.LittleEndian.Uint64(bv) @@ -777,7 +751,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { default: return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) } - } else if endian == "BigEndian" { + case "BigEndian": switch bit { case "uint64": v = binary.BigEndian.Uint64(bv) @@ -788,7 +762,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { default: return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) } - } else { + default: return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) } @@ -833,6 +807,7 @@ var snmpTableCachesLock sync.Mutex // snmpTable resolves the given OID as a table, providing information about the // table and fields within. +//nolint:revive //Too many return variable but necessary func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { snmpTableCachesLock.Lock() if snmpTableCaches == nil { @@ -850,6 +825,7 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err } +//nolint:revive //Too many return variable but necessary func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { mibName, oidNum, oidText, _, err = SnmpTranslate(oid) if err != nil { @@ -857,53 +833,12 @@ func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, f } mibPrefix := mibName + "::" - oidFullName := mibPrefix + oidText - - // first attempt to get the table's tags - tagOids := map[string]struct{}{} - // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - for scanner.Scan() { - line := scanner.Text() - - if !strings.HasPrefix(line, " INDEX") { - continue - } - i := strings.Index(line, "{ ") - if i == -1 { // parse error - continue - } - line = line[i+2:] - i = strings.Index(line, " }") - if i == -1 { // parse error - continue - } - line = line[:i] - for _, col := range strings.Split(line, ", ") { - tagOids[mibPrefix+col] = struct{}{} - } - } - } + col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix) - // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) - if err != nil { - return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) - } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - scanner.Scan() - cols := scanner.Text() - if len(cols) == 0 { - return "", "", "", nil, fmt.Errorf("could not find any columns in table") - } - for _, col := range strings.Split(cols, " ") { - if len(col) == 0 { - continue - } - _, isTag := tagOids[mibPrefix+col] - fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) + for _, c := range col { + _, isTag := tagOids[mibPrefix+c] + fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag}) } return mibName, oidNum, oidText, fields, err @@ -921,6 +856,7 @@ var snmpTranslateCachesLock sync.Mutex var snmpTranslateCaches map[string]snmpTranslateCache // snmpTranslate resolves the given OID. +//nolint:revive //Too many return variable but necessary func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { snmpTranslateCachesLock.Lock() if snmpTranslateCaches == nil { @@ -938,7 +874,7 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // is worth it. Especially when it would slam the system pretty hard if lots // of lookups are being performed. - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmp.SnmpTranslateCall(oid) snmpTranslateCaches[oid] = stc } @@ -946,73 +882,3 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } - -func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - var out []byte - if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) - } else { - out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) - if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound { - // Silently discard error if snmptranslate not found and we have a numeric OID. - // Meaning we can get by without the lookup. - return "", oid, oid, "", nil - } - } - if err != nil { - return "", "", "", "", err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if !ok && scanner.Err() != nil { - return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err()) - } - - oidText = scanner.Text() - - i := strings.Index(oidText, "::") - if i == -1 { - // was not found in MIB. - if bytes.Contains(out, []byte("[TRUNCATED]")) { - return "", oid, oid, "", nil - } - // not truncated, but not fully found. We still need to parse out numeric OID, so keep going - oidText = oid - } else { - mibName = oidText[:i] - oidText = oidText[i+2:] - } - - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") { - tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ") - switch tc { - case "MacAddress", "PhysAddress": - conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": - conversion = "ipaddr" - } - } else if strings.HasPrefix(line, "::= { ") { - objs := strings.TrimPrefix(line, "::= { ") - objs = strings.TrimSuffix(objs, " }") - - for _, obj := range strings.Split(objs, " ") { - if len(obj) == 0 { - continue - } - if i := strings.Index(obj, "("); i != -1 { - obj = obj[i+1:] - oidNum += "." + obj[:strings.Index(obj, ")")] - } else { - oidNum += "." + obj - } - } - break - } - } - - return mibName, oidNum, oidText, conversion, nil -} diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go deleted file mode 100644 index f87f9029b0d06..0000000000000 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build generate -// +build generate - -package main - -import ( - "bufio" - "bytes" - "fmt" - "os" - "os/exec" - "strings" -) - -// This file is a generator used to generate the mocks for the commands used by the tests. - -// These are the commands to be mocked. -var mockedCommands = [][]string{ - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.1.2"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", "1.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, - {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, - {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server"}, - {"snmptranslate", "-Td", "-Ob", "TEST::server.0"}, - {"snmptranslate", "-Td", "-Ob", "TEST::testTable"}, - {"snmptranslate", "-Td", "-Ob", "TEST::connections"}, - {"snmptranslate", "-Td", "-Ob", "TEST::latency"}, - {"snmptranslate", "-Td", "-Ob", "TEST::description"}, - {"snmptranslate", "-Td", "-Ob", "TEST::hostname"}, - {"snmptranslate", "-Td", "-Ob", "IF-MIB::ifPhysAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "BRIDGE-MIB::dot1dTpFdbAddress.1"}, - {"snmptranslate", "-Td", "-Ob", "TCP-MIB::tcpConnectionLocalAddress.1"}, - {"snmptranslate", "-Td", "TEST::testTable.1"}, - {"snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", "TEST::testTable"}, -} - -type mockedCommandResult struct { - stdout string - stderr string - exitError bool -} - -func main() { - if err := generate(); err != nil { - fmt.Fprintf(os.Stderr, "error: %s\n", err) - os.Exit(1) - } -} - -func generate() error { - f, err := os.OpenFile("snmp_mocks_test.go", os.O_RDWR, 0644) - if err != nil { - return err - } - br := bufio.NewReader(f) - var i int64 - for l, err := br.ReadString('\n'); err == nil; l, err = br.ReadString('\n') { - i += int64(len(l)) - if l == "// BEGIN GO GENERATE CONTENT\n" { - break - } - } - f.Truncate(i) - f.Seek(i, 0) - - fmt.Fprintf(f, "var mockedCommandResults = map[string]mockedCommandResult{\n") - - for _, cmd := range mockedCommands { - ec := exec.Command(cmd[0], cmd[1:]...) - out := bytes.NewBuffer(nil) - err := bytes.NewBuffer(nil) - ec.Stdout = out - ec.Stderr = err - ec.Env = []string{ - "MIBDIRS=+./testdata", - } - - var mcr mockedCommandResult - if err := ec.Run(); err != nil { - if err, ok := err.(*exec.ExitError); !ok { - mcr.exitError = true - } else { - return fmt.Errorf("executing %v: %s", cmd, err) - } - } - mcr.stdout = string(out.Bytes()) - mcr.stderr = string(err.Bytes()) - cmd0 := strings.Join(cmd, "\000") - mcrv := fmt.Sprintf("%#v", mcr)[5:] // trim `main.` prefix - fmt.Fprintf(f, "%#v: %s,\n", cmd0, mcrv) - } - f.Write([]byte("}\n")) - f.Close() - - return exec.Command("gofmt", "-w", "snmp_mocks_test.go").Run() -} diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go deleted file mode 100644 index 1927db23246b4..0000000000000 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package snmp - -import ( - "fmt" - "os" - "os/exec" - "strings" - "testing" -) - -type mockedCommandResult struct { - stdout string - stderr string - exitError bool -} - -func mockExecCommand(arg0 string, args ...string) *exec.Cmd { - args = append([]string{"-test.run=TestMockExecCommand", "--", arg0}, args...) - cmd := exec.Command(os.Args[0], args...) - cmd.Stderr = os.Stderr // so the test output shows errors - return cmd -} - -// This is not a real test. This is just a way of mocking out commands. -// -// Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 -func TestMockExecCommand(_ *testing.T) { - var cmd []string - for _, arg := range os.Args { - if arg == "--" { - cmd = []string{} - continue - } - if cmd == nil { - continue - } - cmd = append(cmd, arg) - } - if cmd == nil { - return - } - - cmd0 := strings.Join(cmd, "\000") - mcr, ok := mockedCommandResults[cmd0] - if !ok { - cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix - //nolint:errcheck,revive - fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) - os.Exit(1) - } - //nolint:errcheck,revive - fmt.Printf("%s", mcr.stdout) - //nolint:errcheck,revive - fmt.Fprintf(os.Stderr, "%s", mcr.stderr) - if mcr.exitError { - os.Exit(1) - } - os.Exit(0) -} - -func init() { - execCommand = mockExecCommand -} - -// BEGIN GO GENERATE CONTENT -var mockedCommandResults = map[string]mockedCommandResult{ - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::description": {stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00TEST::testTable.1": {stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, - "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, -} diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 49c9bf381b107..4f18a458a48e2 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -1,22 +1,21 @@ -//go:generate go run -tags generate snmp_mocks_generate.go package snmp import ( "fmt" "net" - "os/exec" + "path/filepath" "sync" "testing" "time" "github.com/gosnmp/gosnmp" + "github.com/influxdata/toml" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/toml" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type testSNMPConnection struct { @@ -63,33 +62,42 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { var tsc = &testSNMPConnection{ host: "tsc", values: map[string]interface{}{ - ".1.0.0.0.1.1.0": "foo", - ".1.0.0.0.1.1.1": []byte("bar"), - ".1.0.0.0.1.1.2": []byte(""), - ".1.0.0.0.1.102": "bad", - ".1.0.0.0.1.2.0": 1, - ".1.0.0.0.1.2.1": 2, - ".1.0.0.0.1.2.2": 0, - ".1.0.0.0.1.3.0": "0.123", - ".1.0.0.0.1.3.1": "0.456", - ".1.0.0.0.1.3.2": "0.000", - ".1.0.0.0.1.3.3": "9.999", - ".1.0.0.0.1.5.0": 123456, - ".1.0.0.1.1": "baz", - ".1.0.0.1.2": 234, - ".1.0.0.1.3": []byte("byte slice"), - ".1.0.0.2.1.5.0.9.9": 11, - ".1.0.0.2.1.5.1.9.9": 22, - ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", - ".1.0.0.3.1.1.10": "instance", - ".1.0.0.3.1.1.11": "instance2", - ".1.0.0.3.1.1.12": "instance3", - ".1.0.0.3.1.2.10": 10, - ".1.0.0.3.1.2.11": 20, - ".1.0.0.3.1.2.12": 20, - ".1.0.0.3.1.3.10": 1, - ".1.0.0.3.1.3.11": 2, - ".1.0.0.3.1.3.12": 3, + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } @@ -104,6 +112,7 @@ func TestSampleConfig(t *testing.T) { ClientConfig: snmp.ClientConfig{ Timeout: config.Duration(5 * time.Second), Version: 2, + Path: []string{"/usr/share/snmp/mibs"}, Community: "public", MaxRepetitions: 10, Retries: 3, @@ -114,6 +123,17 @@ func TestSampleConfig(t *testing.T) { } func TestFieldInit(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + } + + err = s.Init() + require.NoError(t, err) + translations := []struct { inputOid string inputName string @@ -125,8 +145,6 @@ func TestFieldInit(t *testing.T) { {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, - {".1.0.0.0.1.1.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, - {".999", "", "", ".999", ".999", ""}, {"TEST::server", "", "", ".1.0.0.0.1.1", "server", ""}, {"TEST::server.0", "", "", ".1.0.0.0.1.1.0", "server.0", ""}, {"TEST::server", "foo", "", ".1.0.0.0.1.1", "foo", ""}, @@ -134,115 +152,126 @@ func TestFieldInit(t *testing.T) { {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, + {".999", "", "", ".999", ".999", ""}, } for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} err := f.init() - if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { - continue - } - assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) - assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + require.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) + + require.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + require.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) } } func TestTableInit(t *testing.T) { - tbl := Table{ - Oid: ".1.0.0.0", - Fields: []Field{ - {Oid: ".999", Name: "foo"}, - {Oid: "TEST::description", Name: "description", IsTag: true}, + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + Tables: []Table{ + {Oid: ".1.3.6.1.2.1.3.1", + Fields: []Field{ + {Oid: ".999", Name: "foo"}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true}, + {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"}, + }}, }, } - err := tbl.Init() + err = s.Init() require.NoError(t, err) - assert.Equal(t, "testTable", tbl.Name) + require.Equal(t, "atTable", s.Tables[0].Name) - assert.Len(t, tbl.Fields, 5) - assert.Contains(t, tbl.Fields, Field{Oid: ".999", Name: "foo", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - assert.Contains(t, tbl.Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", IsTag: true, initialized: true}) + require.Len(t, s.Tables[0].Fields, 5) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".999", Name: "foo", initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true}) } func TestSnmpInit(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ Tables: []Table{ - {Oid: "TEST::testTable"}, + {Oid: "RFC1213-MIB::atTable"}, }, Fields: []Field{ - {Oid: "TEST::hostname"}, + {Oid: "RFC1213-MIB::atPhysAddress"}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, }, } - err := s.init() + err = s.Init() require.NoError(t, err) - assert.Len(t, s.Tables[0].Fields, 4) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.1", Name: "server", IsTag: true, initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.2", Name: "connections", initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.3", Name: "latency", initialized: true}) - assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.0.0.0.1.4", Name: "description", initialized: true}) + require.Len(t, s.Tables[0].Fields, 3) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true, initialized: true}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + require.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", IsTag: true, initialized: true}) - assert.Equal(t, Field{ - Oid: ".1.0.0.1.1", - Name: "hostname", + require.Equal(t, Field{ + Oid: ".1.3.6.1.2.1.3.1.1.2", + Name: "atPhysAddress", + Conversion: "hwaddr", initialized: true, }, s.Fields[0]) } func TestSnmpInit_noTranslate(t *testing.T) { - // override execCommand so it returns exec.ErrNotFound - defer func(ec func(string, ...string) *exec.Cmd) { execCommand = ec }(execCommand) - execCommand = func(_ string, _ ...string) *exec.Cmd { - return exec.Command("snmptranslateExecErrNotFound") - } - s := &Snmp{ Fields: []Field{ - {Oid: ".1.1.1.1", Name: "one", IsTag: true}, - {Oid: ".1.1.1.2", Name: "two"}, - {Oid: ".1.1.1.3"}, + {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, + {Oid: ".9.1.1.1.2", Name: "two"}, + {Oid: ".9.1.1.1.3"}, }, Tables: []Table{ {Name: "testing", Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, + {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".9.1.1.1.5", Name: "five"}, + {Oid: ".9.1.1.1.6"}, }}, }, + ClientConfig: snmp.ClientConfig{ + Path: []string{}, + }, } - err := s.init() + err := s.Init() require.NoError(t, err) - assert.Equal(t, ".1.1.1.1", s.Fields[0].Oid) - assert.Equal(t, "one", s.Fields[0].Name) - assert.Equal(t, true, s.Fields[0].IsTag) + require.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) + require.Equal(t, "one", s.Fields[0].Name) + require.Equal(t, true, s.Fields[0].IsTag) - assert.Equal(t, ".1.1.1.2", s.Fields[1].Oid) - assert.Equal(t, "two", s.Fields[1].Name) - assert.Equal(t, false, s.Fields[1].IsTag) + require.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) + require.Equal(t, "two", s.Fields[1].Name) + require.Equal(t, false, s.Fields[1].IsTag) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Oid) - assert.Equal(t, ".1.1.1.3", s.Fields[2].Name) - assert.Equal(t, false, s.Fields[2].IsTag) + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) + require.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) + require.Equal(t, false, s.Fields[2].IsTag) - assert.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid) - assert.Equal(t, "four", s.Tables[0].Fields[0].Name) - assert.Equal(t, true, s.Tables[0].Fields[0].IsTag) + require.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) + require.Equal(t, "four", s.Tables[0].Fields[0].Name) + require.Equal(t, true, s.Tables[0].Fields[0].IsTag) - assert.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid) - assert.Equal(t, "five", s.Tables[0].Fields[1].Name) - assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) + require.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) + require.Equal(t, "five", s.Tables[0].Fields[1].Name) + require.Equal(t, false, s.Tables[0].Fields[1].IsTag) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid) - assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name) - assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) + require.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) + require.Equal(t, false, s.Tables[0].Fields[2].IsTag) } func TestSnmpInit_noName_noOid(t *testing.T) { @@ -256,7 +285,7 @@ func TestSnmpInit_noName_noOid(t *testing.T) { }, } - err := s.init() + err := s.Init() require.Error(t, err) } @@ -270,31 +299,31 @@ func TestGetSNMPConnection_v2(t *testing.T) { Community: "foo", }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "1.2.3.4", gs.Target) - assert.EqualValues(t, 567, gs.Port) - assert.Equal(t, gosnmp.Version2c, gs.Version) - assert.Equal(t, "foo", gs.Community) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "1.2.3.4", gs.Target) + require.EqualValues(t, 567, gs.Port) + require.Equal(t, gosnmp.Version2c, gs.Version) + require.Equal(t, "foo", gs.Community) + require.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(1) require.NoError(t, err) gs = gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "1.2.3.4", gs.Target) - assert.EqualValues(t, 161, gs.Port) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "1.2.3.4", gs.Target) + require.EqualValues(t, 161, gs.Port) + require.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(2) require.NoError(t, err) gs = gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "127.0.0.1", gs.Target) - assert.EqualValues(t, 161, gs.Port) - assert.Equal(t, "udp", gs.Transport) + require.Equal(t, "127.0.0.1", gs.Target) + require.EqualValues(t, 161, gs.Port) + require.Equal(t, "udp", gs.Transport) } func TestGetSNMPConnectionTCP(t *testing.T) { @@ -306,16 +335,16 @@ func TestGetSNMPConnectionTCP(t *testing.T) { s := &Snmp{ Agents: []string{"tcp://127.0.0.1:56789"}, } - err := s.init() + err := s.Init() require.NoError(t, err) wg.Add(1) gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, "127.0.0.1", gs.Target) - assert.EqualValues(t, 56789, gs.Port) - assert.Equal(t, "tcp", gs.Transport) + require.Equal(t, "127.0.0.1", gs.Target) + require.EqualValues(t, 56789, gs.Port) + require.Equal(t, "tcp", gs.Transport) wg.Wait() } @@ -347,26 +376,26 @@ func TestGetSNMPConnection_v3(t *testing.T) { EngineTime: 2, }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gs.Version, gosnmp.Version3) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) - assert.Equal(t, "1.2.3.4", gsc.Host()) - assert.EqualValues(t, 20, gs.MaxRepetitions) - assert.Equal(t, "mycontext", gs.ContextName) - assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) - assert.Equal(t, "myuser", sp.UserName) - assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) - assert.Equal(t, "password123", sp.AuthenticationPassphrase) - assert.Equal(t, gosnmp.DES, sp.PrivacyProtocol) - assert.Equal(t, "321drowssap", sp.PrivacyPassphrase) - assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) - assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) - assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + require.Equal(t, "1.2.3.4", gsc.Host()) + require.EqualValues(t, 20, gs.MaxRepetitions) + require.Equal(t, "mycontext", gs.ContextName) + require.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + require.Equal(t, "myuser", sp.UserName) + require.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + require.Equal(t, "password123", sp.AuthenticationPassphrase) + require.Equal(t, gosnmp.DES, sp.PrivacyProtocol) + require.Equal(t, "321drowssap", sp.PrivacyPassphrase) + require.Equal(t, "myengineid", sp.AuthoritativeEngineID) + require.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + require.EqualValues(t, 2, sp.AuthoritativeEngineTime) } func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { @@ -464,26 +493,26 @@ func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { s := tc.Config - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(snmp.GosnmpWrapper) - assert.Equal(t, gs.Version, gosnmp.Version3) + require.Equal(t, gs.Version, gosnmp.Version3) sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) - assert.Equal(t, "1.2.3.4", gsc.Host()) - assert.EqualValues(t, 20, gs.MaxRepetitions) - assert.Equal(t, "mycontext", gs.ContextName) - assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) - assert.Equal(t, "myuser", sp.UserName) - assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) - assert.Equal(t, "password123", sp.AuthenticationPassphrase) - assert.Equal(t, tc.Algorithm, sp.PrivacyProtocol) - assert.Equal(t, "password123", sp.PrivacyPassphrase) - assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) - assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) - assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + require.Equal(t, "1.2.3.4", gsc.Host()) + require.EqualValues(t, 20, gs.MaxRepetitions) + require.Equal(t, "mycontext", gs.ContextName) + require.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + require.Equal(t, "myuser", sp.UserName) + require.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + require.Equal(t, "password123", sp.AuthenticationPassphrase) + require.Equal(t, tc.Algorithm, sp.PrivacyProtocol) + require.Equal(t, "password123", sp.PrivacyPassphrase) + require.Equal(t, "myengineid", sp.AuthoritativeEngineID) + require.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + require.EqualValues(t, 2, sp.AuthoritativeEngineTime) }) } } @@ -492,7 +521,7 @@ func TestGetSNMPConnection_caching(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, } - err := s.init() + err := s.Init() require.NoError(t, err) gs1, err := s.getConnection(0) require.NoError(t, err) @@ -502,9 +531,9 @@ func TestGetSNMPConnection_caching(t *testing.T) { require.NoError(t, err) gs4, err := s.getConnection(2) require.NoError(t, err) - assert.True(t, gs1 == gs2) - assert.False(t, gs2 == gs3) - assert.False(t, gs3 == gs4) + require.Equal(t, gs1, gs2) + require.NotEqual(t, gs2, gs3) + require.NotEqual(t, gs3, gs4) } func TestGosnmpWrapper_walk_retry(t *testing.T) { @@ -554,11 +583,11 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { GoSNMP: gs, } err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) - assert.NoError(t, srvr.Close()) + require.NoError(t, srvr.Close()) wg.Wait() - assert.Error(t, err) - assert.False(t, gs.Conn == conn) - assert.Equal(t, (gs.Retries+1)*2, reqCount) + require.Error(t, err) + require.NotEqual(t, gs.Conn, conn) + require.Equal(t, (gs.Retries+1)*2, reqCount) } func TestGosnmpWrapper_get_retry(t *testing.T) { @@ -609,12 +638,12 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { _, err = gsw.Get([]string{".1.0.0"}) require.NoError(t, srvr.Close()) wg.Wait() - assert.Error(t, err) - assert.False(t, gs.Conn == conn) - assert.Equal(t, (gs.Retries+1)*2, reqCount) + require.Error(t, err) + require.NotEqual(t, gs.Conn, conn) + require.Equal(t, (gs.Retries+1)*2, reqCount) } -func TestTableBuild_walk(t *testing.T) { +func TestTableBuild_walk_noTranslate(t *testing.T) { tbl := Table{ Name: "mytable", IndexAsTag: true, @@ -643,23 +672,12 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.2.1.5", OidIndexLength: 1, }, - { - Name: "myfield6", - Oid: ".1.0.0.0.1.6", - Translate: true, - }, - { - Name: "myfield7", - Oid: ".1.0.0.0.1.6", - Translate: false, - }, }, } tb, err := tbl.Build(tsc, true) require.NoError(t, err) - - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "foo", @@ -670,8 +688,6 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(0.123), "myfield4": 11, "myfield5": 11, - "myfield6": "testTableEntry.7", - "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ @@ -703,11 +719,85 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(9.999), }, } - assert.Len(t, tb.Rows, 4) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) - assert.Contains(t, tb.Rows, rtr4) + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) +} + +func TestTableBuild_walk_Translate(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + }, + } + err = s.Init() + require.NoError(t, err) + + tbl := Table{ + Name: "atTable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "ifIndex", + Oid: "1.3.6.1.2.1.3.1.1.1", + IsTag: true, + }, + { + Name: "atPhysAddress", + Oid: "1.3.6.1.2.1.3.1.1.2", + Translate: false, + }, + { + Name: "atNetAddress", + Oid: "1.3.6.1.2.1.3.1.1.3", + Translate: true, + }, + }, + } + + err = tbl.Init() + require.NoError(t, err) + tb, err := tbl.Build(tsc, true) + require.NoError(t, err) + + require.Equal(t, tb.Name, "atTable") + + rtr1 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "foo", + "index": "0", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 1, + "atNetAddress": "atNetAddress", + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "bar", + "index": "1", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 2, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "index": "2", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 0, + }, + } + + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) } func TestTableBuild_noWalk(t *testing.T) { @@ -746,8 +836,8 @@ func TestTableBuild_noWalk(t *testing.T) { Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, Fields: map[string]interface{}{"myfield2": 234}, } - assert.Len(t, tb.Rows, 1) - assert.Contains(t, tb.Rows, rtr) + require.Len(t, tb.Rows, 1) + require.Contains(t, tb.Rows, rtr) } func TestGather(t *testing.T) { @@ -785,7 +875,6 @@ func TestGather(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} @@ -796,21 +885,21 @@ func TestGather(t *testing.T) { require.Len(t, acc.Metrics, 2) m := acc.Metrics[0] - assert.Equal(t, "mytable", m.Measurement) - assert.Equal(t, "tsc", m.Tags[s.AgentHostTag]) - assert.Equal(t, "baz", m.Tags["myfield1"]) - assert.Len(t, m.Fields, 2) - assert.Equal(t, 234, m.Fields["myfield2"]) - assert.Equal(t, "baz", m.Fields["myfield3"]) - assert.True(t, !tstart.After(m.Time)) - assert.True(t, !tstop.Before(m.Time)) + require.Equal(t, "mytable", m.Measurement) + require.Equal(t, "tsc", m.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m.Tags["myfield1"]) + require.Len(t, m.Fields, 2) + require.Equal(t, 234, m.Fields["myfield2"]) + require.Equal(t, "baz", m.Fields["myfield3"]) + require.False(t, tstart.After(m.Time)) + require.False(t, tstop.Before(m.Time)) m2 := acc.Metrics[1] - assert.Equal(t, "myOtherTable", m2.Measurement) - assert.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) - assert.Equal(t, "baz", m2.Tags["myfield1"]) - assert.Len(t, m2.Fields, 1) - assert.Equal(t, 123456, m2.Fields["myOtherField"]) + require.Equal(t, "myOtherTable", m2.Measurement) + require.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) + require.Equal(t, "baz", m2.Tags["myfield1"]) + require.Len(t, m2.Fields, 1) + require.Equal(t, 123456, m2.Fields["myOtherField"]) } func TestGather_host(t *testing.T) { @@ -832,7 +921,6 @@ func TestGather_host(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} @@ -841,7 +929,7 @@ func TestGather_host(t *testing.T) { require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] - assert.Equal(t, "baz", m.Tags["host"]) + require.Equal(t, "baz", m.Tags["host"]) } func TestFieldConvert(t *testing.T) { @@ -850,11 +938,12 @@ func TestFieldConvert(t *testing.T) { conv string expected interface{} }{ - {[]byte("foo"), "", string("foo")}, + {[]byte("foo"), "", "foo"}, {"0.123", "float", float64(0.123)}, {[]byte("0.123"), "float", float64(0.123)}, {float32(0.123), "float", float64(float32(0.123))}, {float64(0.123), "float", float64(0.123)}, + {float64(0.123123123123), "float", float64(0.123123123123)}, {123, "float", float64(123)}, {123, "float(0)", float64(123)}, {123, "float(4)", float64(0.0123)}, @@ -873,7 +962,7 @@ func TestFieldConvert(t *testing.T) { {[]byte("123123123123"), "int", int64(123123123123)}, {float32(12.3), "int", int64(12)}, {float64(12.3), "int", int64(12)}, - {int(123), "int", int64(123)}, + {123, "int", int64(123)}, {int8(123), "int", int64(123)}, {int16(123), "int", int64(123)}, {int32(123), "int", int64(123)}, @@ -898,10 +987,8 @@ func TestFieldConvert(t *testing.T) { for _, tc := range testTable { act, err := fieldConvert(tc.conv, tc.input) - if !assert.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) { - continue - } - assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + require.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + require.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) } } @@ -909,14 +996,14 @@ func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) - assert.Len(t, snmpTranslateCaches, 1) + require.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) - assert.Equal(t, mibName, stc.mibName) - assert.Equal(t, oidNum, stc.oidNum) - assert.Equal(t, oidText, stc.oidText) - assert.Equal(t, conversion, stc.conversion) - assert.Equal(t, err, stc.err) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, conversion, stc.conversion) + require.Equal(t, err, stc.err) } func TestSnmpTranslateCache_hit(t *testing.T) { @@ -930,11 +1017,11 @@ func TestSnmpTranslateCache_hit(t *testing.T) { }, } mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") - assert.Equal(t, "a", mibName) - assert.Equal(t, "b", oidNum) - assert.Equal(t, "c", oidText) - assert.Equal(t, "d", conversion) - assert.Equal(t, fmt.Errorf("e"), err) + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, "d", conversion) + require.Equal(t, fmt.Errorf("e"), err) snmpTranslateCaches = nil } @@ -942,14 +1029,14 @@ func TestSnmpTableCache_miss(t *testing.T) { snmpTableCaches = nil oid := ".1.0.0.0" mibName, oidNum, oidText, fields, err := snmpTable(oid) - assert.Len(t, snmpTableCaches, 1) + require.Len(t, snmpTableCaches, 1) stc := snmpTableCaches[oid] require.NotNil(t, stc) - assert.Equal(t, mibName, stc.mibName) - assert.Equal(t, oidNum, stc.oidNum) - assert.Equal(t, oidText, stc.oidText) - assert.Equal(t, fields, stc.fields) - assert.Equal(t, err, stc.err) + require.Equal(t, mibName, stc.mibName) + require.Equal(t, oidNum, stc.oidNum) + require.Equal(t, oidText, stc.oidText) + require.Equal(t, fields, stc.fields) + require.Equal(t, err, stc.err) } func TestSnmpTableCache_hit(t *testing.T) { @@ -963,11 +1050,11 @@ func TestSnmpTableCache_hit(t *testing.T) { }, } mibName, oidNum, oidText, fields, err := snmpTable("foo") - assert.Equal(t, "a", mibName) - assert.Equal(t, "b", oidNum) - assert.Equal(t, "c", oidText) - assert.Equal(t, []Field{{Name: "d"}}, fields) - assert.Equal(t, fmt.Errorf("e"), err) + require.Equal(t, "a", mibName) + require.Equal(t, "b", oidNum) + require.Equal(t, "c", oidText) + require.Equal(t, []Field{{Name: "d"}}, fields) + require.Equal(t, fmt.Errorf("e"), err) } func TestTableJoin_walk(t *testing.T) { @@ -1006,7 +1093,7 @@ func TestTableJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1040,10 +1127,10 @@ func TestTableJoin_walk(t *testing.T) { "myfield3": 3, }, } - assert.Len(t, tb.Rows, 3) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) } func TestTableOuterJoin_walk(t *testing.T) { @@ -1083,7 +1170,7 @@ func TestTableOuterJoin_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1126,11 +1213,11 @@ func TestTableOuterJoin_walk(t *testing.T) { "myfield5": 1, }, } - assert.Len(t, tb.Rows, 4) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) - assert.Contains(t, tb.Rows, rtr4) + require.Len(t, tb.Rows, 4) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) + require.Contains(t, tb.Rows, rtr4) } func TestTableJoinNoIndexAsTag_walk(t *testing.T) { @@ -1169,7 +1256,7 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { tb, err := tbl.Build(tsc, true) require.NoError(t, err) - assert.Equal(t, tb.Name, "mytable") + require.Equal(t, tb.Name, "mytable") rtr1 := RTableRow{ Tags: map[string]string{ "myfield1": "instance", @@ -1203,8 +1290,17 @@ func TestTableJoinNoIndexAsTag_walk(t *testing.T) { "myfield3": 3, }, } - assert.Len(t, tb.Rows, 3) - assert.Contains(t, tb.Rows, rtr1) - assert.Contains(t, tb.Rows, rtr2) - assert.Contains(t, tb.Rows, rtr3) + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) +} + +func BenchmarkMibLoading(b *testing.B) { + log := testutil.Logger{} + path := []string{"testdata"} + for i := 0; i < b.N; i++ { + err := snmp.LoadMibsFromPath(path, log) + require.NoError(b, err) + } } diff --git a/plugins/inputs/snmp/testdata/bridgeMib b/plugins/inputs/snmp/testdata/bridgeMib new file mode 100644 index 0000000000000..96f562732fd6a --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMib @@ -0,0 +1,1467 @@ +BRIDGE-MIB DEFINITIONS ::= BEGIN + +-- ---------------------------------------------------------- -- +-- MIB for IEEE 802.1D devices +-- ---------------------------------------------------------- -- +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, NOTIFICATION-TYPE, + Counter32, Integer32, TimeTicks, mib-2, TEXTUAL-CONVENTION, MacAddress, + MODULE-COMPLIANCE, NOTIFICATION-GROUP, OBJECT-GROUP, InterfaceIndex + FROM bridgeMibImports; + +dot1dBridge MODULE-IDENTITY + LAST-UPDATED "200509190000Z" + ORGANIZATION "IETF Bridge MIB Working Group" + CONTACT-INFO + "Email: bridge-mib@ietf.org + + K.C. Norseth (Editor) + L-3 Communications + Tel: +1 801-594-2809 + Email: kenyon.c.norseth@L-3com.com + Postal: 640 N. 2200 West. + Salt Lake City, Utah 84116-0850 + + Les Bell (Editor) + 3Com Europe Limited + Phone: +44 1442 438025 + Email: elbell@ntlworld.com + Postal: 3Com Centre, Boundary Way + Hemel Hempstead + Herts. HP2 7YU + UK + + Send comments to " + DESCRIPTION + "The Bridge MIB module for managing devices that support + IEEE 802.1D. + + Copyright (C) The Internet Society (2005). This version of + this MIB module is part of RFC 4188; see the RFC itself for + full legal notices." + REVISION "200509190000Z" + DESCRIPTION + "Third revision, published as part of RFC 4188. + + The MIB module has been converted to SMIv2 format. + Conformance statements have been added and some + description and reference clauses have been updated. + + The object dot1dStpPortPathCost32 was added to + support IEEE 802.1t and the permissible values of + dot1dStpPriority and dot1dStpPortPriority have been + clarified for bridges supporting IEEE 802.1t or + IEEE 802.1w. + + The interpretation of dot1dStpTimeSinceTopologyChange + has been clarified for bridges supporting the Rapid + Spanning Tree Protocol (RSTP)." + REVISION "199307310000Z" + DESCRIPTION + "Second revision, published as part of RFC 1493." + REVISION "199112310000Z" + DESCRIPTION + "Initial revision, published as part of RFC 1286." + ::= { mib-2 17 } + +-- ---------------------------------------------------------- -- +-- Textual Conventions +-- ---------------------------------------------------------- -- + +BridgeId ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "The Bridge-Identifier, as used in the Spanning Tree + Protocol, to uniquely identify a bridge. Its first two + octets (in network byte order) contain a priority value, + and its last 6 octets contain the MAC address used to + refer to a bridge in a unique fashion (typically, the + numerically smallest MAC address of all ports on the + bridge)." + SYNTAX OCTET STRING (SIZE (8)) + +Timeout ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A Spanning Tree Protocol (STP) timer in units of 1/100 + seconds. Several objects in this MIB module represent + values of timers used by the Spanning Tree Protocol. + In this MIB, these timers have values in units of + hundredths of a second (i.e., 1/100 secs). + + These timers, when stored in a Spanning Tree Protocol's + BPDU, are in units of 1/256 seconds. Note, however, that + 802.1D-1998 specifies a settable granularity of no more + than one second for these timers. To avoid ambiguity, + a conversion algorithm is defined below for converting + between the different units, which ensures a timer's + value is not distorted by multiple conversions. + + To convert a Timeout value into a value in units of + 1/256 seconds, the following algorithm should be used: + + b = floor( (n * 256) / 100) + + where: + floor = quotient [ignore remainder] + n is the value in 1/100 second units + b is the value in 1/256 second units + + To convert the value from 1/256 second units back to + 1/100 seconds, the following algorithm should be used: + + n = ceiling( (b * 100) / 256) + + where: + ceiling = quotient [if remainder is 0], or + quotient + 1 [if remainder is nonzero] + n is the value in 1/100 second units + + b is the value in 1/256 second units + + Note: it is important that the arithmetic operations are + done in the order specified (i.e., multiply first, + divide second)." + SYNTAX Integer32 + +-- ---------------------------------------------------------- -- +-- subtrees in the Bridge MIB +-- ---------------------------------------------------------- -- + +dot1dNotifications OBJECT IDENTIFIER ::= { dot1dBridge 0 } + +dot1dBase OBJECT IDENTIFIER ::= { dot1dBridge 1 } +dot1dStp OBJECT IDENTIFIER ::= { dot1dBridge 2 } + +dot1dSr OBJECT IDENTIFIER ::= { dot1dBridge 3 } +-- documented in RFC 1525 + +dot1dTp OBJECT IDENTIFIER ::= { dot1dBridge 4 } +dot1dStatic OBJECT IDENTIFIER ::= { dot1dBridge 5 } + +-- Subtrees used by Bridge MIB Extensions: +-- pBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 6 } +-- qBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 7 } +-- Note that the practice of registering related MIB modules +-- below dot1dBridge has been discouraged since there is no +-- robust mechanism to track such registrations. + +dot1dConformance OBJECT IDENTIFIER ::= { dot1dBridge 8 } + +-- ---------------------------------------------------------- -- +-- the dot1dBase subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dBase subtree is mandatory for all +-- bridges. +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The MAC address used by this bridge when it must be + referred to in a unique fashion. It is recommended + that this be the numerically smallest MAC address of + all ports that belong to this bridge. However, it is only + + required to be unique. When concatenated with + dot1dStpPriority, a unique BridgeIdentifier is formed, + which is used in the Spanning Tree Protocol." + REFERENCE + "IEEE 802.1D-1998: clauses 14.4.1.1.3 and 7.12.5" + ::= { dot1dBase 1 } + +dot1dBaseNumPorts OBJECT-TYPE + SYNTAX Integer32 + UNITS "ports" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of ports controlled by this bridging + entity." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.1.1.3" + ::= { dot1dBase 2 } + +dot1dBaseType OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + transparent-only(2), + sourceroute-only(3), + srt(4) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Indicates what type of bridging this bridge can + perform. If a bridge is actually performing a + certain type of bridging, this will be indicated by + entries in the port table for the given type." + ::= { dot1dBase 3 } + +-- ---------------------------------------------------------- -- +-- The Generic Bridge Port Table +-- ---------------------------------------------------------- -- +dot1dBasePortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains generic information about every + port that is associated with this bridge. Transparent, + source-route, and srt ports are included." + ::= { dot1dBase 4 } + +dot1dBasePortEntry OBJECT-TYPE + SYNTAX Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of the bridge." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.2, 14.6.1" + INDEX { dot1dBasePort } + ::= { dot1dBasePortTable 1 } + +Dot1dBasePortEntry ::= + SEQUENCE { + dot1dBasePort + Integer32, + dot1dBasePortIfIndex + InterfaceIndex, + dot1dBasePortCircuit + OBJECT IDENTIFIER, + dot1dBasePortDelayExceededDiscards + Counter32, + dot1dBasePortMtuExceededDiscards + Counter32 + } + +dot1dBasePort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains bridge management information." + ::= { dot1dBasePortEntry 1 } + +dot1dBasePortIfIndex OBJECT-TYPE + SYNTAX InterfaceIndex + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of the instance of the ifIndex object, + defined in IF-MIB, for the interface corresponding + to this port." + ::= { dot1dBasePortEntry 2 } + +dot1dBasePortCircuit OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "For a port that (potentially) has the same value of + dot1dBasePortIfIndex as another port on the same bridge. + This object contains the name of an object instance + unique to this port. For example, in the case where + multiple ports correspond one-to-one with multiple X.25 + virtual circuits, this value might identify an (e.g., + the first) object instance associated with the X.25 + virtual circuit corresponding to this port. + + For a port which has a unique value of + dot1dBasePortIfIndex, this object can have the value + { 0 0 }." + ::= { dot1dBasePortEntry 3 } + +dot1dBasePortDelayExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to excessive transit delay through the bridge. It + is incremented by both transparent and source + route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 4 } + +dot1dBasePortMtuExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to an excessive size. It is incremented by both + transparent and source route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 5 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dStp subtree is optional. It is +-- implemented by those bridges that support the Spanning Tree +-- Protocol. +-- ---------------------------------------------------------- -- + +dot1dStpProtocolSpecification OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + decLb100(2), + ieee8021d(3) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An indication of what version of the Spanning Tree + Protocol is being run. The value 'decLb100(2)' + indicates the DEC LANbridge 100 Spanning Tree protocol. + IEEE 802.1D implementations will return 'ieee8021d(3)'. + If future versions of the IEEE Spanning Tree Protocol + that are incompatible with the current version + are released a new value will be defined." + ::= { dot1dStp 1 } + +dot1dStpPriority OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the write-able portion of the Bridge ID + (i.e., the first two octets of the (8 octet long) Bridge + ID). The other (last) 6 octets of the Bridge ID are + given by the value of dot1dBaseBridgeAddress. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-61440, in steps of 4096." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStp 2 } + +dot1dStpTimeSinceTopologyChange OBJECT-TYPE + SYNTAX TimeTicks + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The time (in hundredths of a second) since the + last time a topology change was detected by the + bridge entity. + For RSTP, this reports the time since the tcWhile + timer for any port on this Bridge was nonzero." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1., + IEEE 802.1w clause 14.8.1.1." + ::= { dot1dStp 3 } + +dot1dStpTopChanges OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of topology changes detected by + this bridge since the management entity was last + reset or initialized." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1." + ::= { dot1dStp 4 } + +dot1dStpDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The bridge identifier of the root of the spanning + tree, as determined by the Spanning Tree Protocol, + as executed by this node. This value is used as + the Root Identifier parameter in all Configuration + Bridge PDUs originated by this node." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.1" + ::= { dot1dStp 5 } + +dot1dStpRootCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The cost of the path to the root as seen from + this bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.2" + ::= { dot1dStp 6 } + +dot1dStpRootPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port that offers the lowest + cost path from this bridge to the root bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.3" + ::= { dot1dStp 7 } + +dot1dStpMaxAge OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum age of Spanning Tree Protocol information + learned from the network on any port before it is + discarded, in units of hundredths of a second. This is + the actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.4" + ::= { dot1dStp 8 } + +dot1dStpHelloTime OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The amount of time between the transmission of + Configuration bridge PDUs by this node on any port when + it is the root of the spanning tree, or trying to become + so, in units of hundredths of a second. This is the + actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.5" + ::= { dot1dStp 9 } + +dot1dStpHoldTime OBJECT-TYPE + SYNTAX Integer32 + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value determines the interval length + during which no more than two Configuration bridge + PDUs shall be transmitted by this node, in units + of hundredths of a second." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.14" + ::= { dot1dStp 10 } + +dot1dStpForwardDelay OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value, measured in units of hundredths of a + second, controls how fast a port changes its spanning + state when moving towards the Forwarding state. The + value determines how long the port stays in each of the + Listening and Learning states, which precede the + Forwarding state. This value is also used when a + topology change has been detected and is underway, to + age all dynamic entries in the Forwarding Database. + [Note that this value is the one that this bridge is + currently using, in contrast to + dot1dStpBridgeForwardDelay, which is the value that this + bridge and all others would start using if/when this + bridge were to become the root.]" + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.6" + ::= { dot1dStp 11 } + +dot1dStpBridgeMaxAge OBJECT-TYPE + SYNTAX Timeout (600..4000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for MaxAge when this + bridge is acting as the root. Note that 802.1D-1998 + specifies that the range for this parameter is related + to the value of dot1dStpBridgeHelloTime. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.8" + ::= { dot1dStp 12 } + +dot1dStpBridgeHelloTime OBJECT-TYPE + SYNTAX Timeout (100..1000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for HelloTime when this + bridge is acting as the root. The granularity of this + timer is specified by 802.1D-1998 to be 1 second. An + agent may return a badValue error if a set is attempted + + to a value that is not a whole number of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.9" + ::= { dot1dStp 13 } + +dot1dStpBridgeForwardDelay OBJECT-TYPE + SYNTAX Timeout (400..3000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for ForwardDelay when + this bridge is acting as the root. Note that + 802.1D-1998 specifies that the range for this parameter + is related to the value of dot1dStpBridgeMaxAge. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.10" + ::= { dot1dStp 14 } + +-- ---------------------------------------------------------- -- +-- The Spanning Tree Port Table +-- ---------------------------------------------------------- -- + +dot1dStpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains port-specific information + for the Spanning Tree Protocol." + ::= { dot1dStp 15 } + +dot1dStpPortEntry OBJECT-TYPE + SYNTAX Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information maintained by every port about + the Spanning Tree Protocol state for that port." + INDEX { dot1dStpPort } + ::= { dot1dStpPortTable 1 } + +Dot1dStpPortEntry ::= + SEQUENCE { + + dot1dStpPort + Integer32, + dot1dStpPortPriority + Integer32, + dot1dStpPortState + INTEGER, + dot1dStpPortEnable + INTEGER, + dot1dStpPortPathCost + Integer32, + dot1dStpPortDesignatedRoot + BridgeId, + dot1dStpPortDesignatedCost + Integer32, + dot1dStpPortDesignatedBridge + BridgeId, + dot1dStpPortDesignatedPort + OCTET STRING, + dot1dStpPortForwardTransitions + Counter32, + dot1dStpPortPathCost32 + Integer32 + } + +dot1dStpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Spanning Tree Protocol management information." + REFERENCE + "IEEE 802.1D-1998: clause 14.8.2.1.2" + ::= { dot1dStpPortEntry 1 } + +dot1dStpPortPriority OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the priority field that is contained in + the first (in network byte order) octet of the (2 octet + long) Port ID. The other octet of the Port ID is given + by the value of dot1dStpPort. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-240, in steps of 16." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStpPortEntry 2 } + +dot1dStpPortState OBJECT-TYPE + SYNTAX INTEGER { + disabled(1), + blocking(2), + listening(3), + learning(4), + forwarding(5), + broken(6) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port's current state, as defined by application of + the Spanning Tree Protocol. This state controls what + action a port takes on reception of a frame. If the + bridge has detected a port that is malfunctioning, it + will place that port into the broken(6) state. For + ports that are disabled (see dot1dStpPortEnable), this + object will have a value of disabled(1)." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 3 } + +dot1dStpPortEnable OBJECT-TYPE + SYNTAX INTEGER { + enabled(1), + disabled(2) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The enabled/disabled status of the port." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 4 } + +dot1dStpPortPathCost OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + + the speed of the attached LAN. + + New implementations should support dot1dStpPortPathCost32. + If the port path costs exceeds the maximum value of this + object then this object should report the maximum value, + namely 65535. Applications should try to read the + dot1dStpPortPathCost32 object if this object reports + the maximum value." + REFERENCE "IEEE 802.1D-1998: clause 8.5.5.3" + ::= { dot1dStpPortEntry 5 } + +dot1dStpPortDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The unique Bridge Identifier of the Bridge + recorded as the Root in the Configuration BPDUs + transmitted by the Designated Bridge for the + segment to which the port is attached." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.4" + ::= { dot1dStpPortEntry 6 } + +dot1dStpPortDesignatedCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The path cost of the Designated Port of the segment + connected to this port. This value is compared to the + Root Path Cost field in received bridge PDUs." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.5" + ::= { dot1dStpPortEntry 7 } + +dot1dStpPortDesignatedBridge OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Bridge Identifier of the bridge that this + port considers to be the Designated Bridge for + this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.6" + ::= { dot1dStpPortEntry 8 } + +dot1dStpPortDesignatedPort OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (2)) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Port Identifier of the port on the Designated + Bridge for this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.7" + ::= { dot1dStpPortEntry 9 } + +dot1dStpPortForwardTransitions OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times this port has transitioned + from the Learning state to the Forwarding state." + ::= { dot1dStpPortEntry 10 } + +dot1dStpPortPathCost32 OBJECT-TYPE + SYNTAX Integer32 (1..200000000) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + the speed of the attached LAN. + + This object replaces dot1dStpPortPathCost to support + IEEE 802.1t." + REFERENCE + "IEEE 802.1t clause 8.10.2, Table 8-5." + ::= { dot1dStpPortEntry 11 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dTp subtree is optional. It is +-- implemented by those bridges that support the transparent +-- bridging mode. A transparent or SRT bridge will implement +-- this subtree. +-- ---------------------------------------------------------- -- + +dot1dTpLearnedEntryDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of Forwarding Database entries that + have been or would have been learned, but have been + discarded due to a lack of storage space in the + Forwarding Database. If this counter is increasing, it + indicates that the Forwarding Database is regularly + becoming full (a condition that has unpleasant + performance effects on the subnetwork). If this counter + has a significant value but is not presently increasing, + it indicates that the problem has been occurring but is + not persistent." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 1 } + +dot1dTpAgingTime OBJECT-TYPE + SYNTAX Integer32 (10..1000000) + UNITS "seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The timeout period in seconds for aging out + dynamically-learned forwarding information. + 802.1D-1998 recommends a default of 300 seconds." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 2 } + +-- ---------------------------------------------------------- -- +-- The Forwarding Database for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpFdbTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about unicast + entries for which the bridge has forwarding and/or + filtering information. This information is used + by the transparent bridging function in + determining how to propagate a received frame." + ::= { dot1dTp 3 } + +dot1dTpFdbEntry OBJECT-TYPE + SYNTAX Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Information about a specific unicast MAC address + for which the bridge has some forwarding and/or + filtering information." + INDEX { dot1dTpFdbAddress } + ::= { dot1dTpFdbTable 1 } + +Dot1dTpFdbEntry ::= + SEQUENCE { + dot1dTpFdbAddress + MacAddress, + dot1dTpFdbPort + Integer32, + dot1dTpFdbStatus + INTEGER + } + +dot1dTpFdbAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "A unicast MAC address for which the bridge has + forwarding and/or filtering information." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dTpFdbEntry 1 } + +dot1dTpFdbPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port on + which a frame having a source address equal to the value + of the corresponding instance of dot1dTpFdbAddress has + been seen. A value of '0' indicates that the port + number has not been learned, but that the bridge does + have some forwarding/filtering information about this + address (e.g., in the dot1dStaticTable). Implementors + are encouraged to assign the port value to this object + whenever it is learned, even for addresses for which the + corresponding value of dot1dTpFdbStatus is not + learned(3)." + ::= { dot1dTpFdbEntry 2 } + +dot1dTpFdbStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + learned(3), + self(4), + mgmt(5) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The status of this entry. The meanings of the + values are: + other(1) - none of the following. This would + include the case where some other MIB object + (not the corresponding instance of + dot1dTpFdbPort, nor an entry in the + dot1dStaticTable) is being used to determine if + and how frames addressed to the value of the + corresponding instance of dot1dTpFdbAddress are + being forwarded. + invalid(2) - this entry is no longer valid (e.g., + it was learned but has since aged out), but has + not yet been flushed from the table. + learned(3) - the value of the corresponding instance + of dot1dTpFdbPort was learned, and is being + used. + self(4) - the value of the corresponding instance of + dot1dTpFdbAddress represents one of the bridge's + addresses. The corresponding instance of + dot1dTpFdbPort indicates which of the bridge's + ports has this address. + mgmt(5) - the value of the corresponding instance of + dot1dTpFdbAddress is also the value of an + existing instance of dot1dStaticAddress." + ::= { dot1dTpFdbEntry 3 } + +-- ---------------------------------------------------------- -- +-- Port Table for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about every port that + is associated with this transparent bridge." + ::= { dot1dTp 4 } + +dot1dTpPortEntry OBJECT-TYPE + SYNTAX Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of a transparent + bridge." + INDEX { dot1dTpPort } + ::= { dot1dTpPortTable 1 } + +Dot1dTpPortEntry ::= + SEQUENCE { + dot1dTpPort + Integer32, + dot1dTpPortMaxInfo + Integer32, + dot1dTpPortInFrames + Counter32, + dot1dTpPortOutFrames + Counter32, + dot1dTpPortInDiscards + Counter32 + } + +dot1dTpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Transparent bridging management information." + ::= { dot1dTpPortEntry 1 } + +-- It would be nice if we could use ifMtu as the size of the +-- largest INFO field, but we can't because ifMtu is defined +-- to be the size that the (inter-)network layer can use, which +-- can differ from the MAC layer (especially if several layers +-- of encapsulation are used). + +dot1dTpPortMaxInfo OBJECT-TYPE + SYNTAX Integer32 + UNITS "bytes" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum size of the INFO (non-MAC) field that + + this port will receive or transmit." + ::= { dot1dTpPortEntry 2 } + +dot1dTpPortInFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been received by this + port from its segment. Note that a frame received on the + interface corresponding to this port is only counted by + this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 3 } + +dot1dTpPortOutFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been transmitted by this + port to its segment. Note that a frame transmitted on + the interface corresponding to this port is only counted + by this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 4 } + +dot1dTpPortInDiscards OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Count of received valid frames that were discarded + (i.e., filtered) by the Forwarding Process." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 5 } + +-- ---------------------------------------------------------- -- + +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- +-- Implementation of this subtree is optional. +-- ---------------------------------------------------------- -- + +dot1dStaticTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing filtering information configured + into the bridge by (local or network) management + specifying the set of ports to which frames received + from specific ports and containing specific destination + addresses are allowed to be forwarded. The value of + zero in this table, as the port number from which frames + with a specific destination address are received, is + used to specify all ports for which there is no specific + entry in this table for that particular destination + address. Entries are valid for unicast and for + group/broadcast addresses." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + ::= { dot1dStatic 1 } + +dot1dStaticEntry OBJECT-TYPE + SYNTAX Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Filtering information configured into the bridge by + (local or network) management specifying the set of + ports to which frames received from a specific port and + containing a specific destination address are allowed to + be forwarded." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + INDEX { dot1dStaticAddress, dot1dStaticReceivePort } + ::= { dot1dStaticTable 1 } + +Dot1dStaticEntry ::= + SEQUENCE { + dot1dStaticAddress MacAddress, + dot1dStaticReceivePort Integer32, + dot1dStaticAllowedToGoTo OCTET STRING, + dot1dStaticStatus INTEGER + } + +dot1dStaticAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The destination MAC address in a frame to which this + entry's filtering information applies. This object can + take the value of a unicast address, a group address, or + the broadcast address." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dStaticEntry 1 } + +dot1dStaticReceivePort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port + from which a frame must be received in order for this + entry's filtering information to apply. A value of zero + indicates that this entry applies on all ports of the + bridge for which there is no other applicable entry." + ::= { dot1dStaticEntry 2 } + +dot1dStaticAllowedToGoTo OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..512)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The set of ports to which frames received from a + specific port and destined for a specific MAC address, + are allowed to be forwarded. Each octet within the + value of this object specifies a set of eight ports, + with the first octet specifying ports 1 through 8, the + second octet specifying ports 9 through 16, etc. Within + each octet, the most significant bit represents the + lowest numbered port, and the least significant bit + represents the highest numbered port. Thus, each port + of the bridge is represented by a single bit within the + value of this object. If that bit has a value of '1', + then that port is included in the set of ports; the port + is not included if its bit has a value of '0'. (Note + that the setting of the bit corresponding to the port + from which a frame is received is irrelevant.) The + default value of this object is a string of ones of + appropriate length. + + The value of this object may exceed the required minimum + maximum message size of some SNMP transport (484 bytes, + in the case of SNMP over UDP, see RFC 3417, section 3.2). + SNMP engines on bridges supporting a large number of + ports must support appropriate maximum message sizes." + ::= { dot1dStaticEntry 3 } + +dot1dStaticStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + permanent(3), + deleteOnReset(4), + deleteOnTimeout(5) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object indicates the status of this entry. + The default value is permanent(3). + other(1) - this entry is currently in use but the + conditions under which it will remain so are + different from each of the following values. + invalid(2) - writing this value to the object + removes the corresponding entry. + permanent(3) - this entry is currently in use and + will remain so after the next reset of the + bridge. + deleteOnReset(4) - this entry is currently in use + and will remain so until the next reset of the + bridge. + deleteOnTimeout(5) - this entry is currently in use + and will remain so until it is aged out." + ::= { dot1dStaticEntry 4 } + +-- ---------------------------------------------------------- -- +-- Notifications for use by Bridges +-- ---------------------------------------------------------- -- +-- Notifications for the Spanning Tree Protocol +-- ---------------------------------------------------------- -- + +newRoot NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "The newRoot trap indicates that the sending agent has + become the new root of the Spanning Tree; the trap is + sent by a bridge soon after its election as the new + + root, e.g., upon expiration of the Topology Change Timer, + immediately subsequent to its election. Implementation + of this trap is optional." + ::= { dot1dNotifications 1 } + +topologyChange NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "A topologyChange trap is sent by a bridge when any of + its configured ports transitions from the Learning state + to the Forwarding state, or from the Forwarding state to + the Blocking state. The trap is not sent if a newRoot + trap is sent for the same transition. Implementation of + this trap is optional." + ::= { dot1dNotifications 2 } + +-- ---------------------------------------------------------- -- +-- IEEE 802.1D MIB - Conformance Information +-- ---------------------------------------------------------- -- + +dot1dGroups OBJECT IDENTIFIER ::= { dot1dConformance 1 } +dot1dCompliances OBJECT IDENTIFIER ::= { dot1dConformance 2 } + +-- ---------------------------------------------------------- -- +-- units of conformance +-- ---------------------------------------------------------- -- + +-- ---------------------------------------------------------- -- +-- the dot1dBase group +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dBaseBridgeAddress, + dot1dBaseNumPorts, + dot1dBaseType + } + STATUS current + DESCRIPTION + "Bridge level information for this device." + ::= { dot1dGroups 1 } + +dot1dBasePortGroup OBJECT-GROUP + OBJECTS { + dot1dBasePort, + dot1dBasePortIfIndex, + dot1dBasePortCircuit, + dot1dBasePortDelayExceededDiscards, + dot1dBasePortMtuExceededDiscards + } + STATUS current + DESCRIPTION + "Information for each port on this device." + ::= { dot1dGroups 2 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp group +-- ---------------------------------------------------------- -- + +dot1dStpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dStpProtocolSpecification, + dot1dStpPriority, + dot1dStpTimeSinceTopologyChange, + dot1dStpTopChanges, + dot1dStpDesignatedRoot, + dot1dStpRootCost, + dot1dStpRootPort, + dot1dStpMaxAge, + dot1dStpHelloTime, + dot1dStpHoldTime, + dot1dStpForwardDelay, + dot1dStpBridgeMaxAge, + dot1dStpBridgeHelloTime, + dot1dStpBridgeForwardDelay + } + STATUS current + DESCRIPTION + "Bridge level Spanning Tree data for this device." + ::= { dot1dGroups 3 } + +dot1dStpPortGroup OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortPathCost, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 4 } + +dot1dStpPortGroup2 OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions, + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 5 } + +dot1dStpPortGroup3 OBJECT-GROUP + OBJECTS { + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for devices supporting 32-bit + path costs." + ::= { dot1dGroups 6 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp group +-- ---------------------------------------------------------- -- + +dot1dTpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dTpLearnedEntryDiscards, + dot1dTpAgingTime + } + STATUS current + DESCRIPTION + "Bridge level Transparent Bridging data." + ::= { dot1dGroups 7 } + +dot1dTpFdbGroup OBJECT-GROUP + OBJECTS { + + dot1dTpFdbAddress, + dot1dTpFdbPort, + dot1dTpFdbStatus + } + STATUS current + DESCRIPTION + "Filtering Database information for the Bridge." + ::= { dot1dGroups 8 } + +dot1dTpGroup OBJECT-GROUP + OBJECTS { + dot1dTpPort, + dot1dTpPortMaxInfo, + dot1dTpPortInFrames, + dot1dTpPortOutFrames, + dot1dTpPortInDiscards + } + STATUS current + DESCRIPTION + "Dynamic Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 9 } + +-- ---------------------------------------------------------- -- +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- + +dot1dStaticGroup OBJECT-GROUP + OBJECTS { + dot1dStaticAddress, + dot1dStaticReceivePort, + dot1dStaticAllowedToGoTo, + dot1dStaticStatus + } + STATUS current + DESCRIPTION + "Static Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 10 } + +-- ---------------------------------------------------------- -- +-- The Trap Notification Group +-- ---------------------------------------------------------- -- + +dot1dNotificationGroup NOTIFICATION-GROUP + NOTIFICATIONS { + newRoot, + topologyChange + } + STATUS current + DESCRIPTION + "Group of objects describing notifications (traps)." + ::= { dot1dGroups 11 } + +-- ---------------------------------------------------------- -- +-- compliance statements +-- ---------------------------------------------------------- -- + +bridgeCompliance1493 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services, as per RFC1493." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 1 } + +bridgeCompliance4188 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services. This supports 32-bit Path Cost values and the + more restricted bridge and port priorities, as per IEEE + 802.1t. + + Full support for the 802.1D management objects requires that + the SNMPv2-MIB [RFC3418] objects sysDescr, and sysUpTime, as + well as the IF-MIB [RFC2863] objects ifIndex, ifType, + ifDescr, ifPhysAddress, and ifLastChange are implemented." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + OBJECT dot1dStpPriority + SYNTAX Integer32 (0|4096|8192|12288|16384|20480|24576 + |28672|32768|36864|40960|45056|49152 + |53248|57344|61440) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dStpPortGroup2 + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup3 + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol and 32-bit path + costs. In particular, this includes devices supporting + IEEE 802.1t and IEEE 802.1w." + + OBJECT dot1dStpPortPriority + SYNTAX Integer32 (0|16|32|48|64|80|96|112|128 + |144|160|176|192|208|224|240) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 2 } + +END diff --git a/plugins/inputs/snmp/testdata/bridgeMibImports b/plugins/inputs/snmp/testdata/bridgeMibImports new file mode 100644 index 0000000000000..8f6a52bd36058 --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMibImports @@ -0,0 +1,554 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InterfaceIndex ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A unique value, greater than zero, for each interface or + interface sub-layer in the managed system. It is + recommended that values are assigned contiguously starting + from 1. The value for each interface sub-layer must remain + constant at least from one re-initialization of the entity's + network management system to the next re-initialization." + SYNTAX Integer32 (1..2147483647) + + + +MacAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents an 802 MAC address represented in the + `canonical' order defined by IEEE 802.1a, i.e., as if it + were transmitted least significant bit first, even though + 802.5 (in contrast to other 802.x protocols) requires MAC + addresses to be transmitted most significant bit first." + SYNTAX OCTET STRING (SIZE (6)) + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/foo b/plugins/inputs/snmp/testdata/foo new file mode 100644 index 0000000000000..4e9bf7f9d16f9 --- /dev/null +++ b/plugins/inputs/snmp/testdata/foo @@ -0,0 +1,30 @@ +FOOTEST-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +fooTestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +fooMIBObjects OBJECT IDENTIFIER ::= { iso 2 } +fooOne OBJECT IDENTIFIER ::= { iso 1 } +six OBJECT IDENTIFIER ::= { fooOne 1 } +three OBJECT IDENTIFIER ::= { six 3 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { fooMIBObjects 3 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/fooImports b/plugins/inputs/snmp/testdata/fooImports new file mode 100644 index 0000000000000..6cbed24de4b95 --- /dev/null +++ b/plugins/inputs/snmp/testdata/fooImports @@ -0,0 +1,169 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 2 } +internet OBJECT IDENTIFIER ::= { dod 3 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/ifPhysAddress b/plugins/inputs/snmp/testdata/ifPhysAddress new file mode 100644 index 0000000000000..8ac5b5a2e9489 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddress @@ -0,0 +1,84 @@ +IF-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, mib-2, + PhysAddress FROM ifPhysAddressImports; + +ifMIB MODULE-IDENTITY + LAST-UPDATED "200006140000Z" + ORGANIZATION "IETF Interfaces MIB Working Group" + CONTACT-INFO + " Keith McCloghrie + Cisco Systems, Inc. + 170 West Tasman Drive + San Jose, CA 95134-1706 + US + + 408-526-5260 + kzm@cisco.com" + DESCRIPTION + "The MIB module to describe generic objects for network + interface sub-layers. This MIB is an updated version of + MIB-II's ifTable, and incorporates the extensions defined in + RFC 1229." + + REVISION "200006140000Z" + DESCRIPTION + "Clarifications agreed upon by the Interfaces MIB WG, and + published as RFC 2863." + REVISION "199602282155Z" + DESCRIPTION + "Revisions made by the Interfaces MIB WG, and published in + RFC 2233." + REVISION "199311082155Z" + DESCRIPTION + "Initial revision, published as part of RFC 1573." + ::= { mib-2 31 } + +ifMIBObjects OBJECT IDENTIFIER ::= { ifMIB 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of interface entries. The number of entries is + given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An entry containing management information applicable to a + particular interface." + INDEX { ifIndex } + ::= { ifTable 1 } + + + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { ifEntry 9 } + +END diff --git a/plugins/inputs/snmp/testdata/ifPhysAddressImports b/plugins/inputs/snmp/testdata/ifPhysAddressImports new file mode 100644 index 0000000000000..316f665b4f916 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddressImports @@ -0,0 +1,254 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +PhysAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents media- or physical-level addresses." + SYNTAX OCTET STRING + + +END diff --git a/plugins/inputs/snmp/testdata/server b/plugins/inputs/snmp/testdata/server new file mode 100644 index 0000000000000..4f97618d62ef3 --- /dev/null +++ b/plugins/inputs/snmp/testdata/server @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +server OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +connections OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +latency OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +description OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/serverImports b/plugins/inputs/snmp/testdata/serverImports new file mode 100644 index 0000000000000..6bfb238234f07 --- /dev/null +++ b/plugins/inputs/snmp/testdata/serverImports @@ -0,0 +1,174 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 1 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 1 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/snmpd.conf b/plugins/inputs/snmp/testdata/snmpd.conf deleted file mode 100644 index 3f3151a6550c0..0000000000000 --- a/plugins/inputs/snmp/testdata/snmpd.conf +++ /dev/null @@ -1,17 +0,0 @@ -# This config provides the data represented in the plugin documentation -# Requires net-snmp >= 5.7 - -#agentaddress UDP:127.0.0.1:1161 -rocommunity public - -override .1.0.0.0.1.1.0 octet_str "foo" -override .1.0.0.0.1.1.1 octet_str "bar" -override .1.0.0.0.1.102 octet_str "bad" -override .1.0.0.0.1.2.0 integer 1 -override .1.0.0.0.1.2.1 integer 2 -override .1.0.0.0.1.3.0 octet_str "0.123" -override .1.0.0.0.1.3.1 octet_str "0.456" -override .1.0.0.0.1.3.2 octet_str "9.999" -override .1.0.0.1.1 octet_str "baz" -override .1.0.0.1.2 uinteger 54321 -override .1.0.0.1.3 uinteger 234 diff --git a/plugins/inputs/snmp/testdata/tableBuild b/plugins/inputs/snmp/testdata/tableBuild new file mode 100644 index 0000000000000..0551bfd6dd1d4 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableBuild @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +myfield1 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +myfield2 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +myfield3 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +myfield4 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMib b/plugins/inputs/snmp/testdata/tableMib new file mode 100644 index 0000000000000..be13c1c1cc510 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMib @@ -0,0 +1,2613 @@ +RFC1213-MIB DEFINITIONS ::= BEGIN + +IMPORTS + mgmt, NetworkAddress, IpAddress, Counter, Gauge, + TimeTicks + FROM RFC1155-SMI + OBJECT-TYPE + FROM fooImports; + +-- This MIB module uses the extended OBJECT-TYPE macro as +-- defined in [14]; + +-- MIB-II (same prefix as MIB-I) + +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +-- textual conventions + +DisplayString ::= + OCTET STRING +-- This data type is used to model textual information taken +-- from the NVT ASCII character set. By convention, objects +-- with this syntax are declared as having + +-- +-- SIZE (0..255) + +PhysAddress ::= + OCTET STRING +-- This data type is used to model media addresses. For many +-- types of media, this will be in a binary representation. +-- For example, an ethernet address would be represented as +-- a string of 6 octets. + +-- groups in MIB-II + +system OBJECT IDENTIFIER ::= { mib-2 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + +at OBJECT IDENTIFIER ::= { mib-2 3 } + +ip OBJECT IDENTIFIER ::= { mib-2 4 } + +icmp OBJECT IDENTIFIER ::= { mib-2 5 } + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +udp OBJECT IDENTIFIER ::= { mib-2 7 } + +egp OBJECT IDENTIFIER ::= { mib-2 8 } + +-- historical (some say hysterical) +-- cmot OBJECT IDENTIFIER ::= { mib-2 9 } + +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +snmp OBJECT IDENTIFIER ::= { mib-2 11 } + +-- the System group + +-- Implementation of the System group is mandatory for all +-- systems. If an agent is not configured to have a value +-- for any of these variables, a string of length 0 is +-- returned. + +sysDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual description of the entity. This value + should include the full name and version + identification of the system's hardware type, + software operating-system, and networking + software. It is mandatory that this only contain + printable ASCII characters." + ::= { system 1 } + +sysObjectID OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The vendor's authoritative identification of the + network management subsystem contained in the + entity. This value is allocated within the SMI + enterprises subtree (1.3.6.1.4.1) and provides an + easy and unambiguous means for determining `what + kind of box' is being managed. For example, if + vendor `Flintstones, Inc.' was assigned the + subtree 1.3.6.1.4.1.4242, it could assign the + identifier 1.3.6.1.4.1.4242.1.1 to its `Fred + Router'." + ::= { system 2 } + +sysUpTime OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The time (in hundredths of a second) since the + network management portion of the system was last + re-initialized." + ::= { system 3 } + +sysContact OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The textual identification of the contact person + for this managed node, together with information + on how to contact this person." + ::= { system 4 } + +sysName OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An administratively-assigned name for this + managed node. By convention, this is the node's + fully-qualified domain name." + ::= { system 5 } + +sysLocation OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The physical location of this node (e.g., + `telephone closet, 3rd floor')." + ::= { system 6 } + +sysServices OBJECT-TYPE + SYNTAX INTEGER (0..127) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A value which indicates the set of services that + this entity primarily offers. + + The value is a sum. This sum initially takes the + value zero, Then, for each layer, L, in the range + 1 through 7, that this node performs transactions + for, 2 raised to (L - 1) is added to the sum. For + example, a node which performs primarily routing + functions would have a value of 4 (2^(3-1)). In + contrast, a node which is a host offering + application services would have a value of 72 + (2^(4-1) + 2^(7-1)). Note that in the context of + the Internet suite of protocols, values should be + calculated accordingly: + + layer functionality + 1 physical (e.g., repeaters) + 2 datalink/subnetwork (e.g., bridges) + 3 internet (e.g., IP gateways) + 4 end-to-end (e.g., IP hosts) + 7 applications (e.g., mail relays) + + For systems including OSI protocols, layers 5 and + 6 may also be counted." + ::= { system 7 } + +-- the Interfaces group + +-- Implementation of the Interfaces group is mandatory for +-- all systems. + +ifNumber OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of network interfaces (regardless of + their current state) present on this system." + ::= { interfaces 1 } + +-- the Interfaces table + +-- The Interfaces table contains information on the entity's +-- interfaces. Each interface is thought of as being +-- attached to a `subnetwork'. Note that this term should +-- not be confused with `subnet' which refers to an +-- addressing partitioning scheme used in the Internet suite +-- of protocols. + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A list of interface entries. The number of + entries is given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "An interface entry containing objects at the + subnetwork layer and below for a particular + interface." + INDEX { ifIndex } + ::= { ifTable 1 } + +IfEntry ::= + SEQUENCE { + ifIndex + INTEGER, + ifDescr + DisplayString, + ifType + INTEGER, + ifMtu + INTEGER, + ifSpeed + Gauge, + ifPhysAddress + PhysAddress, + ifAdminStatus + INTEGER, + ifOperStatus + INTEGER, + ifLastChange + TimeTicks, + ifInOctets + Counter, + ifInUcastPkts + Counter, + ifInNUcastPkts + Counter, + ifInDiscards + Counter, + ifInErrors + Counter, + ifInUnknownProtos + Counter, + ifOutOctets + Counter, + ifOutUcastPkts + Counter, + ifOutNUcastPkts + Counter, + ifOutDiscards + Counter, + ifOutErrors + Counter, + ifOutQLen + Gauge, + ifSpecific + OBJECT IDENTIFIER + } + +ifIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A unique value for each interface. Its value + ranges between 1 and the value of ifNumber. The + value for each interface must remain constant at + least from one re-initialization of the entity's + network management system to the next re- + initialization." + ::= { ifEntry 1 } + +ifDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual string containing information about the + interface. This string should include the name of + the manufacturer, the product name and the version + of the hardware interface." + ::= { ifEntry 2 } + +ifType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + regular1822(2), + hdh1822(3), + ddn-x25(4), + rfc877-x25(5), + ethernet-csmacd(6), + iso88023-csmacd(7), + iso88024-tokenBus(8), + iso88025-tokenRing(9), + iso88026-man(10), + starLan(11), + proteon-10Mbit(12), + proteon-80Mbit(13), + hyperchannel(14), + fddi(15), + lapb(16), + sdlc(17), + ds1(18), -- T-1 + e1(19), -- european equiv. of T-1 + basicISDN(20), + primaryISDN(21), -- proprietary serial + propPointToPointSerial(22), + ppp(23), + softwareLoopback(24), + eon(25), -- CLNP over IP [11] + ethernet-3Mbit(26), + nsip(27), -- XNS over IP + slip(28), -- generic SLIP + ultra(29), -- ULTRA technologies + ds3(30), -- T-3 + sip(31), -- SMDS + frame-relay(32) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The type of interface, distinguished according to + the physical/link protocol(s) immediately `below' + the network layer in the protocol stack." + ::= { ifEntry 3 } + +ifMtu OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest datagram which can be + sent/received on the interface, specified in + octets. For interfaces that are used for + transmitting network datagrams, this is the size + of the largest network datagram that can be sent + on the interface." + ::= { ifEntry 4 } + +ifSpeed OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "An estimate of the interface's current bandwidth + in bits per second. For interfaces which do not + vary in bandwidth or for those where no accurate + estimation can be made, this object should contain + the nominal bandwidth." + ::= { ifEntry 5 } + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +ifAdminStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The desired state of the interface. The + testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 7 } + +ifOperStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The current operational state of the interface. + The testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 8 } + +ifLastChange OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of sysUpTime at the time the interface + entered its current operational state. If the + current state was entered prior to the last re- + initialization of the local network management + subsystem, then this object contains a zero + value." + ::= { ifEntry 9 } + +ifInOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets received on the + interface, including framing characters." + ::= { ifEntry 10 } + +ifInUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of subnetwork-unicast packets + delivered to a higher-layer protocol." + ::= { ifEntry 11 } + +ifInNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of non-unicast (i.e., subnetwork- + broadcast or subnetwork-multicast) packets + delivered to a higher-layer protocol." + ::= { ifEntry 12 } + +ifInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being deliverable to a + higher-layer protocol. One possible reason for + discarding such a packet could be to free up + buffer space." + ::= { ifEntry 13 } + +ifInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets that contained + errors preventing them from being deliverable to a + higher-layer protocol." + ::= { ifEntry 14 } + +ifInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of packets received via the interface + which were discarded because of an unknown or + unsupported protocol." + ::= { ifEntry 15 } + +ifOutOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets transmitted out of the + interface, including framing characters." + ::= { ifEntry 16 } + +ifOutUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a + subnetwork-unicast address, including those that + were discarded or not sent." + ::= { ifEntry 17 } + +ifOutNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a non- + unicast (i.e., a subnetwork-broadcast or + subnetwork-multicast) address, including those + that were discarded or not sent." + ::= { ifEntry 18 } + +ifOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets which were chosen + + to be discarded even though no errors had been + detected to prevent their being transmitted. One + possible reason for discarding such a packet could + be to free up buffer space." + ::= { ifEntry 19 } + +ifOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets that could not be + transmitted because of errors." + ::= { ifEntry 20 } + +ifOutQLen OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The length of the output packet queue (in + packets)." + ::= { ifEntry 21 } + +ifSpecific OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular media being used to realize the + interface. For example, if the interface is + realized by an ethernet, then the value of this + object refers to a document defining objects + specific to ethernet. If this information is not + present, its value should be set to the OBJECT + IDENTIFIER { 0 0 }, which is a syntatically valid + object identifier, and any conformant + implementation of ASN.1 and BER must be able to + generate and recognize this value." + ::= { ifEntry 22 } + +-- the Address Translation group + +-- Implementation of the Address Translation group is +-- mandatory for all systems. Note however that this group +-- is deprecated by MIB-II. That is, it is being included + +-- solely for compatibility with MIB-I nodes, and will most +-- likely be excluded from MIB-III nodes. From MIB-II and +-- onwards, each network protocol group contains its own +-- address translation tables. + +-- The Address Translation group contains one table which is +-- the union across all interfaces of the translation tables +-- for converting a NetworkAddress (e.g., an IP address) into +-- a subnetwork-specific address. For lack of a better term, +-- this document refers to such a subnetwork-specific address +-- as a `physical' address. + +-- Examples of such translation tables are: for broadcast +-- media where ARP is in use, the translation table is +-- equivalent to the ARP cache; or, on an X.25 network where +-- non-algorithmic translation to X.121 addresses is +-- required, the translation table contains the +-- NetworkAddress to X.121 address equivalences. + +atTable OBJECT-TYPE + SYNTAX SEQUENCE OF AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "The Address Translation tables contain the + NetworkAddress to `physical' address equivalences. + Some interfaces do not use translation tables for + determining address equivalences (e.g., DDN-X.25 + has an algorithmic method); if all interfaces are + of this type, then the Address Translation table + is empty, i.e., has zero entries." + ::= { at 1 } + +atEntry OBJECT-TYPE + SYNTAX AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "Each entry contains one NetworkAddress to + `physical' address equivalence." + INDEX { atIfIndex, + atNetAddress } + ::= { atTable 1 } + +AtEntry ::= + SEQUENCE { + atIfIndex + INTEGER, + atPhysAddress + PhysAddress, + atNetAddress + NetworkAddress + } + +atIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { atEntry 1 } + +atPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The media-dependent `physical' address. + + Setting this object to a null string (one of zero + length) has the effect of invaliding the + corresponding entry in the atTable object. That + is, it effectively dissasociates the interface + identified with said entry from the mapping + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant atPhysAddress object." + ::= { atEntry 2 } + +atNetAddress OBJECT-TYPE + SYNTAX NetworkAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The NetworkAddress (e.g., the IP address) + corresponding to the media-dependent `physical' + address." + ::= { atEntry 3 } + +-- the IP group + +-- Implementation of the IP group is mandatory for all +-- systems. + +ipForwarding OBJECT-TYPE + SYNTAX INTEGER { + forwarding(1), -- acting as a gateway + not-forwarding(2) -- NOT acting as a gateway + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The indication of whether this entity is acting + as an IP gateway in respect to the forwarding of + datagrams received by, but not addressed to, this + entity. IP gateways forward datagrams. IP hosts + do not (except those source-routed via the host). + + Note that for some managed nodes, this object may + take on only a subset of the values possible. + Accordingly, it is appropriate for an agent to + return a `badValue' response if a management + station attempts to change this object to an + inappropriate value." + ::= { ip 1 } + +ipDefaultTTL OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The default value inserted into the Time-To-Live + field of the IP header of datagrams originated at + this entity, whenever a TTL value is not supplied + by the transport layer protocol." + ::= { ip 2 } + +ipInReceives OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams received from + interfaces, including those received in error." + ::= { ip 3 } + +ipInHdrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded due to + errors in their IP headers, including bad + checksums, version number mismatch, other format + errors, time-to-live exceeded, errors discovered + in processing their IP options, etc." + ::= { ip 4 } + +ipInAddrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded because + the IP address in their IP header's destination + field was not a valid address to be received at + this entity. This count includes invalid + addresses (e.g., 0.0.0.0) and addresses of + unsupported Classes (e.g., Class E). For entities + which are not IP Gateways and therefore do not + forward datagrams, this counter includes datagrams + discarded because the destination address was not + a local address." + ::= { ip 5 } + +ipForwDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams for which this + entity was not their final IP destination, as a + result of which an attempt was made to find a + route to forward them to that final destination. + In entities which do not act as IP Gateways, this + counter will include only those packets which were + Source-Routed via this entity, and the Source- + Route option processing was successful." + ::= { ip 6 } + +ipInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally-addressed datagrams + received successfully but discarded because of an + unknown or unsupported protocol." + ::= { ip 7 } + +ipInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input IP datagrams for which no + problems were encountered to prevent their + continued processing, but which were discarded + (e.g., for lack of buffer space). Note that this + counter does not include any datagrams discarded + while awaiting re-assembly." + ::= { ip 8 } + +ipInDelivers OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams successfully + delivered to IP user-protocols (including ICMP)." + ::= { ip 9 } + +ipOutRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of IP datagrams which local IP + user-protocols (including ICMP) supplied to IP in + requests for transmission. Note that this counter + does not include any datagrams counted in + ipForwDatagrams." + ::= { ip 10 } + +ipOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of output IP datagrams for which no + + problem was encountered to prevent their + transmission to their destination, but which were + discarded (e.g., for lack of buffer space). Note + that this counter would include datagrams counted + in ipForwDatagrams if any such packets met this + (discretionary) discard criterion." + ::= { ip 11 } + +ipOutNoRoutes OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams discarded because no + route could be found to transmit them to their + destination. Note that this counter includes any + packets counted in ipForwDatagrams which meet this + `no-route' criterion. Note that this includes any + datagarms which a host cannot route because all of + its default gateways are down." + ::= { ip 12 } + +ipReasmTimeout OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum number of seconds which received + fragments are held while they are awaiting + reassembly at this entity." + ::= { ip 13 } + +ipReasmReqds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP fragments received which needed + to be reassembled at this entity." + ::= { ip 14 } + +ipReasmOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams successfully re- + assembled." + ::= { ip 15 } + +ipReasmFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of failures detected by the IP re- + assembly algorithm (for whatever reason: timed + out, errors, etc). Note that this is not + necessarily a count of discarded IP fragments + since some algorithms (notably the algorithm in + RFC 815) can lose track of the number of fragments + by combining them as they are received." + ::= { ip 16 } + +ipFragOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + successfully fragmented at this entity." + ::= { ip 17 } + +ipFragFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + discarded because they needed to be fragmented at + this entity but could not be, e.g., because their + Don't Fragment flag was set." + ::= { ip 18 } + +ipFragCreates OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagram fragments that have + been generated as a result of fragmentation at + this entity." + ::= { ip 19 } + +-- the IP address table + +-- The IP address table contains this entity's IP addressing +-- information. + +ipAddrTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The table of addressing information relevant to + this entity's IP addresses." + ::= { ip 20 } + +ipAddrEntry OBJECT-TYPE + SYNTAX IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The addressing information for one of this + entity's IP addresses." + INDEX { ipAdEntAddr } + ::= { ipAddrTable 1 } + +IpAddrEntry ::= + SEQUENCE { + ipAdEntAddr + IpAddress, + ipAdEntIfIndex + INTEGER, + ipAdEntNetMask + IpAddress, + ipAdEntBcastAddr + INTEGER, + ipAdEntReasmMaxSize + INTEGER (0..65535) + } + +ipAdEntAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address to which this entry's addressing + information pertains." + ::= { ipAddrEntry 1 } + +ipAdEntIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + interface to which this entry is applicable. The + interface identified by a particular value of this + index is the same interface as identified by the + same value of ifIndex." + ::= { ipAddrEntry 2 } + +ipAdEntNetMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The subnet mask associated with the IP address of + this entry. The value of the mask is an IP + address with all the network bits set to 1 and all + the hosts bits set to 0." + ::= { ipAddrEntry 3 } + +ipAdEntBcastAddr OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of the least-significant bit in the IP + broadcast address used for sending datagrams on + the (logical) interface associated with the IP + address of this entry. For example, when the + Internet standard all-ones broadcast address is + used, the value will be 1. This value applies to + both the subnet and network broadcasts addresses + used by the entity on this (logical) interface." + ::= { ipAddrEntry 4 } + +ipAdEntReasmMaxSize OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest IP datagram which this + entity can re-assemble from incoming IP fragmented + datagrams received on this interface." + ::= { ipAddrEntry 5 } + +-- the IP routing table + +-- The IP routing table contains an entry for each route +-- presently known to this entity. + +ipRouteTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "This entity's IP Routing table." + ::= { ip 21 } + +ipRouteEntry OBJECT-TYPE + SYNTAX IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A route to a particular destination." + INDEX { ipRouteDest } + ::= { ipRouteTable 1 } + +IpRouteEntry ::= + SEQUENCE { + ipRouteDest + IpAddress, + ipRouteIfIndex + INTEGER, + ipRouteMetric1 + INTEGER, + ipRouteMetric2 + INTEGER, + ipRouteMetric3 + INTEGER, + ipRouteMetric4 + INTEGER, + ipRouteNextHop + IpAddress, + ipRouteType + INTEGER, + ipRouteProto + INTEGER, + ipRouteAge + INTEGER, + ipRouteMask + IpAddress, + ipRouteMetric5 + INTEGER, + ipRouteInfo + OBJECT IDENTIFIER + } + +ipRouteDest OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The destination IP address of this route. An + entry with a value of 0.0.0.0 is considered a + default route. Multiple routes to a single + destination can appear in the table, but access to + such multiple entries is dependent on the table- + access mechanisms defined by the network + management protocol in use." + ::= { ipRouteEntry 1 } + +ipRouteIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + local interface through which the next hop of this + route should be reached. The interface identified + by a particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipRouteEntry 2 } + +ipRouteMetric1 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The primary routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 3 } + +ipRouteMetric2 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 4 } + +ipRouteMetric3 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 5 } + +ipRouteMetric4 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 6 } + +ipRouteNextHop OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IP address of the next hop of this route. + (In the case of a route bound to an interface + which is realized via a broadcast media, the value + of this field is the agent's IP address on that + interface.)" + ::= { ipRouteEntry 7 } + +ipRouteType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + invalid(2), -- an invalidated route + + -- route to directly + direct(3), -- connected (sub-)network + + -- route to a non-local + indirect(4) -- host/network/sub-network + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of route. Note that the values + direct(3) and indirect(4) refer to the notion of + direct and indirect routing in the IP + architecture. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipRouteTable object. That is, it + effectively dissasociates the destination + identified with said entry from the route + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant ipRouteType object." + ::= { ipRouteEntry 8 } + +ipRouteProto OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + -- non-protocol information, + -- e.g., manually configured + local(2), -- entries + + -- set via a network + netmgmt(3), -- management protocol + + -- obtained via ICMP, + icmp(4), -- e.g., Redirect + + -- the remaining values are + -- all gateway routing + -- protocols + egp(5), + ggp(6), + hello(7), + rip(8), + is-is(9), + es-is(10), + ciscoIgrp(11), + bbnSpfIgp(12), + ospf(13), + bgp(14) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The routing mechanism via which this route was + learned. Inclusion of values for gateway routing + protocols is not intended to imply that hosts + should support those protocols." + ::= { ipRouteEntry 9 } + +ipRouteAge OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The number of seconds since this route was last + updated or otherwise determined to be correct. + Note that no semantics of `too old' can be implied + except through knowledge of the routing protocol + by which the route was learned." + ::= { ipRouteEntry 10 } + +ipRouteMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicate the mask to be logical-ANDed with the + destination address before being compared to the + value in the ipRouteDest field. For those systems + that do not support arbitrary subnet masks, an + agent constructs the value of the ipRouteMask by + determining whether the value of the correspondent + ipRouteDest field belong to a class-A, B, or C + network, and then using one of: + + mask network + 255.0.0.0 class-A + 255.255.0.0 class-B + 255.255.255.0 class-C + + If the value of the ipRouteDest is 0.0.0.0 (a + default route), then the mask value is also + 0.0.0.0. It should be noted that all IP routing + subsystems implicitly use this mechanism." + ::= { ipRouteEntry 11 } + +ipRouteMetric5 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 12 } + +ipRouteInfo OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular routing protocol which is responsible + for this route, as determined by the value + specified in the route's ipRouteProto value. If + this information is not present, its value should + be set to the OBJECT IDENTIFIER { 0 0 }, which is + a syntatically valid object identifier, and any + conformant implementation of ASN.1 and BER must be + able to generate and recognize this value." + ::= { ipRouteEntry 13 } + +-- the IP Address Translation table + +-- The IP address translation table contain the IpAddress to +-- `physical' address equivalences. Some interfaces do not +-- use translation tables for determining address +-- equivalences (e.g., DDN-X.25 has an algorithmic method); +-- if all interfaces are of this type, then the Address +-- Translation table is empty, i.e., has zero entries. + +ipNetToMediaTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The IP Address Translation table used for mapping + from IP addresses to physical addresses." + ::= { ip 22 } + +ipNetToMediaEntry OBJECT-TYPE + SYNTAX IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Each entry contains one IpAddress to `physical' + address equivalence." + INDEX { ipNetToMediaIfIndex, + ipNetToMediaNetAddress } + ::= { ipNetToMediaTable 1 } + +IpNetToMediaEntry ::= + SEQUENCE { + ipNetToMediaIfIndex + INTEGER, + ipNetToMediaPhysAddress + PhysAddress, + ipNetToMediaNetAddress + IpAddress, + ipNetToMediaType + INTEGER + } + +ipNetToMediaIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipNetToMediaEntry 1 } + +ipNetToMediaPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The media-dependent `physical' address." + ::= { ipNetToMediaEntry 2 } + +ipNetToMediaNetAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IpAddress corresponding to the media- + dependent `physical' address." + ::= { ipNetToMediaEntry 3 } + +ipNetToMediaType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + invalid(2), -- an invalidated mapping + dynamic(3), + static(4) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of mapping. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipNetToMediaTable. That is, it effectively + dissasociates the interface identified with said + entry from the mapping identified with said entry. + It is an implementation-specific matter as to + whether the agent removes an invalidated entry + from the table. Accordingly, management stations + must be prepared to receive tabular information + from agents that corresponds to entries not + currently in use. Proper interpretation of such + entries requires examination of the relevant + ipNetToMediaType object." + ::= { ipNetToMediaEntry 4 } + +-- additional IP objects + +ipRoutingDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of routing entries which were chosen + to be discarded even though they are valid. One + possible reason for discarding such an entry could + be to free-up buffer space for other routing + + entries." + ::= { ip 23 } + +-- the ICMP group + +-- Implementation of the ICMP group is mandatory for all +-- systems. + +icmpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which the + entity received. Note that this counter includes + all those counted by icmpInErrors." + ::= { icmp 1 } + +icmpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which the entity + received but determined as having ICMP-specific + errors (bad ICMP checksums, bad length, etc.)." + ::= { icmp 2 } + +icmpInDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages received." + ::= { icmp 3 } + +icmpInTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages + received." + ::= { icmp 4 } + +icmpInParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + received." + ::= { icmp 5 } + +icmpInSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages + received." + ::= { icmp 6 } + +icmpInRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages received." + ::= { icmp 7 } + +icmpInEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages + received." + ::= { icmp 8 } + +icmpInEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages received." + ::= { icmp 9 } + +icmpInTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + received." + ::= { icmp 10 } + +icmpInTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + received." + ::= { icmp 11 } + +icmpInAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + received." + ::= { icmp 12 } + +icmpInAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + received." + ::= { icmp 13 } + +icmpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which this + entity attempted to send. Note that this counter + includes all those counted by icmpOutErrors." + ::= { icmp 14 } + +icmpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which this entity did + not send due to problems discovered within ICMP + + such as a lack of buffers. This value should not + include errors discovered outside the ICMP layer + such as the inability of IP to route the resultant + datagram. In some implementations there may be no + types of error which contribute to this counter's + value." + ::= { icmp 15 } + +icmpOutDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages sent." + ::= { icmp 16 } + +icmpOutTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages sent." + ::= { icmp 17 } + +icmpOutParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + sent." + ::= { icmp 18 } + +icmpOutSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages sent." + ::= { icmp 19 } + +icmpOutRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages sent. For a + + host, this object will always be zero, since hosts + do not send redirects." + ::= { icmp 20 } + +icmpOutEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages sent." + ::= { icmp 21 } + +icmpOutEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages sent." + ::= { icmp 22 } + +icmpOutTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + sent." + ::= { icmp 23 } + +icmpOutTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + sent." + ::= { icmp 24 } + +icmpOutAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + sent." + ::= { icmp 25 } + +icmpOutAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + sent." + ::= { icmp 26 } + +-- the TCP group + +-- Implementation of the TCP group is mandatory for all +-- systems that implement the TCP. + +-- Note that instances of object types that represent +-- information about a particular TCP connection are +-- transient; they persist only as long as the connection +-- in question. + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4) -- Van Jacobson's algorithm [10] + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The algorithm used to determine the timeout value + used for retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The minimum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + LBOUND quantity described in RFC 793." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + UBOUND quantity described in RFC 793." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The limit on the total number of TCP connections + the entity can support. In entities where the + maximum number of connections is dynamic, this + object should contain the value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-SENT state from the + CLOSED state." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-RCVD state from the + LISTEN state." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the SYN-SENT state or the SYN-RCVD state, plus the + number of times TCP connections have made a direct + transition to the LISTEN state from the SYN-RCVD + state." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the ESTABLISHED state or the CLOSE-WAIT state." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP connections for which the + current state is either ESTABLISHED or CLOSE- + WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received, including + those received in error. This count includes + segments received on currently established + connections." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments sent, including + those on current connections but excluding those + containing only retransmitted octets." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments retransmitted - that + is, the number of TCP segments transmitted + containing one or more previously transmitted + octets." + ::= { tcp 12 } + +-- the TCP Connection table + +-- The TCP connection table contains information about this +-- entity's existing TCP connections. + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing TCP connection-specific + information." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current TCP + connection. An object of this type is transient, + in that it ceases to exist when (or soon after) + the connection makes the transition to the CLOSED + state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= + SEQUENCE { + tcpConnState + INTEGER, + tcpConnLocalAddress + IpAddress, + tcpConnLocalPort + INTEGER (0..65535), + tcpConnRemAddress + IpAddress, + tcpConnRemPort + INTEGER (0..65535) + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The state of this TCP connection. + + The only value which may be set by a management + station is deleteTCB(12). Accordingly, it is + appropriate for an agent to return a `badValue' + response if a management station attempts to set + this object to any other value. + + If a management station sets this object to the + value deleteTCB(12), then this has the effect of + deleting the TCB (as defined in RFC 793) of the + corresponding connection on the managed node, + resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST + + segment may be sent from the managed node to the + other TCP endpoint (note however that RST segments + are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this TCP connection. In + the case of a connection in the listen state which + is willing to accept connections for any IP + interface associated with the node, the value + 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- additional TCP objects + +tcpInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received in error + (e.g., bad TCP checksums)." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP segments sent containing the + RST flag." + ::= { tcp 15 } + +-- the UDP group + +-- Implementation of the UDP group is mandatory for all +-- systems which implement the UDP. + +udpInDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams delivered to + UDP users." + ::= { udp 1 } + +udpNoPorts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of received UDP datagrams for + which there was no application at the destination + port." + ::= { udp 2 } + +udpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of received UDP datagrams that could + not be delivered for reasons other than the lack + of an application at the destination port." + ::= { udp 3 } + +udpOutDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams sent from this + entity." + ::= { udp 4 } + +-- the UDP Listener table + +-- The UDP listener table contains information about this +-- entity's UDP end-points on which a local application is +-- currently accepting datagrams. + +udpTable OBJECT-TYPE + SYNTAX SEQUENCE OF UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing UDP listener information." + ::= { udp 5 } + +udpEntry OBJECT-TYPE + SYNTAX UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current UDP + listener." + INDEX { udpLocalAddress, udpLocalPort } + ::= { udpTable 1 } + +UdpEntry ::= + SEQUENCE { + udpLocalAddress + IpAddress, + udpLocalPort + INTEGER (0..65535) + } + +udpLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this UDP listener. In + + the case of a UDP listener which is willing to + accept datagrams for any IP interface associated + with the node, the value 0.0.0.0 is used." + ::= { udpEntry 1 } + +udpLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this UDP listener." + ::= { udpEntry 2 } + +-- the EGP group + +-- Implementation of the EGP group is mandatory for all +-- systems which implement the EGP. + +egpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without + error." + ::= { egp 1 } + +egpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received that proved + to be in error." + ::= { egp 2 } + +egpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of locally generated EGP + messages." + ::= { egp 3 } + +egpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent due to resource limitations within an EGP + entity." + ::= { egp 4 } + +-- the EGP Neighbor table + +-- The EGP neighbor table contains information about this +-- entity's EGP neighbors. + +egpNeighTable OBJECT-TYPE + SYNTAX SEQUENCE OF EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The EGP neighbor table." + ::= { egp 5 } + +egpNeighEntry OBJECT-TYPE + SYNTAX EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about this entity's relationship with + a particular EGP neighbor." + INDEX { egpNeighAddr } + ::= { egpNeighTable 1 } + +EgpNeighEntry ::= + SEQUENCE { + egpNeighState + INTEGER, + egpNeighAddr + IpAddress, + egpNeighAs + INTEGER, + egpNeighInMsgs + Counter, + egpNeighInErrs + Counter, + egpNeighOutMsgs + Counter, + egpNeighOutErrs + Counter, + egpNeighInErrMsgs + Counter, + egpNeighOutErrMsgs + Counter, + egpNeighStateUps + Counter, + egpNeighStateDowns + Counter, + egpNeighIntervalHello + INTEGER, + egpNeighIntervalPoll + INTEGER, + egpNeighMode + INTEGER, + egpNeighEventTrigger + INTEGER + } + +egpNeighState OBJECT-TYPE + SYNTAX INTEGER { + idle(1), + acquisition(2), + down(3), + up(4), + cease(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The EGP state of the local system with respect to + this entry's EGP neighbor. Each EGP state is + represented by a value that is one greater than + the numerical value associated with said state in + RFC 904." + ::= { egpNeighEntry 1 } + +egpNeighAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address of this entry's EGP neighbor." + ::= { egpNeighEntry 2 } + +egpNeighAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system of this EGP peer. Zero + should be specified if the autonomous system + number of the neighbor is not yet known." + ::= { egpNeighEntry 3 } + +egpNeighInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without error + from this EGP peer." + ::= { egpNeighEntry 4 } + +egpNeighInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received from this EGP + peer that proved to be in error (e.g., bad EGP + checksum)." + ::= { egpNeighEntry 5 } + +egpNeighOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages to + this EGP peer." + ::= { egpNeighEntry 6 } + +egpNeighOutErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent to this EGP peer due to resource limitations + within an EGP entity." + ::= { egpNeighEntry 7 } + +egpNeighInErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages received + from this EGP peer." + ::= { egpNeighEntry 8 } + +egpNeighOutErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages sent to + this EGP peer." + ::= { egpNeighEntry 9 } + +egpNeighStateUps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions to the UP + state with this EGP peer." + ::= { egpNeighEntry 10 } + +egpNeighStateDowns OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions from the UP + state to any other state with this EGP peer." + ::= { egpNeighEntry 11 } + +egpNeighIntervalHello OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP Hello command + retransmissions (in hundredths of a second). This + represents the t1 timer as defined in RFC 904." + ::= { egpNeighEntry 12 } + +egpNeighIntervalPoll OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP poll command + + retransmissions (in hundredths of a second). This + represents the t3 timer as defined in RFC 904." + ::= { egpNeighEntry 13 } + +egpNeighMode OBJECT-TYPE + SYNTAX INTEGER { active(1), passive(2) } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The polling mode of this EGP entity, either + passive or active." + ::= { egpNeighEntry 14 } + +egpNeighEventTrigger OBJECT-TYPE + SYNTAX INTEGER { start(1), stop(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "A control variable used to trigger operator- + initiated Start and Stop events. When read, this + variable always returns the most recent value that + egpNeighEventTrigger was set to. If it has not + been set since the last initialization of the + network management subsystem on the node, it + returns a value of `stop'. + + When set, this variable causes a Start or Stop + event on the specified neighbor, as specified on + pages 8-10 of RFC 904. Briefly, a Start event + causes an Idle peer to begin neighbor acquisition + and a non-Idle peer to reinitiate neighbor + acquisition. A stop event causes a non-Idle peer + to return to the Idle state until a Start event + occurs, either via egpNeighEventTrigger or + otherwise." + ::= { egpNeighEntry 15 } + +-- additional EGP objects + +egpAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system number of this EGP entity." + ::= { egp 6 } + +-- the Transmission group + +-- Based on the transmission media underlying each interface +-- on a system, the corresponding portion of the Transmission +-- group is mandatory for that system. + +-- When Internet-standard definitions for managing +-- transmission media are defined, the transmission group is +-- used to provide a prefix for the names of those objects. + +-- Typically, such definitions reside in the experimental +-- portion of the MIB until they are "proven", then as a +-- part of the Internet standardization process, the +-- definitions are accordingly elevated and a new object +-- identifier, under the transmission group is defined. By +-- convention, the name assigned is: +-- +-- type OBJECT IDENTIFIER ::= { transmission number } +-- +-- where "type" is the symbolic value used for the media in +-- the ifType column of the ifTable object, and "number" is +-- the actual integer value corresponding to the symbol. + +-- the SNMP group + +-- Implementation of the SNMP group is mandatory for all +-- systems which support an SNMP protocol entity. Some of +-- the objects defined below will be zero-valued in those +-- SNMP implementations that are optimized to support only +-- those functions specific to either a management agent or +-- a management station. In particular, it should be +-- observed that the objects below refer to an SNMP entity, +-- and there may be several SNMP entities residing on a +-- managed node (e.g., if the node is hosting acting as +-- a management station). + +snmpInPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of Messages delivered to the + SNMP entity from the transport service." + ::= { snmp 1 } + +snmpOutPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + passed from the SNMP protocol entity to the + transport service." + ::= { snmp 2 } + +snmpInBadVersions OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + delivered to the SNMP protocol entity and were for + an unsupported SNMP version." + ::= { snmp 3 } + +snmpInBadCommunityNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which used a SNMP + community name not known to said entity." + ::= { snmp 4 } + +snmpInBadCommunityUses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which represented an SNMP + operation which was not allowed by the SNMP + community named in the Message." + ::= { snmp 5 } + +snmpInASNParseErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ASN.1 or BER errors + encountered by the SNMP protocol entity when + decoding received SNMP Messages." + ::= { snmp 6 } + +-- { snmp 7 } is not used + +snmpInTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `tooBig'." + ::= { snmp 8 } + +snmpInNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `noSuchName'." + ::= { snmp 9 } + +snmpInBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 10 } + +snmpInReadOnlys OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number valid SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `readOnly'. It should be noted that it is a + protocol error to generate an SNMP PDU which + contains the value `readOnly' in the error-status + field, as such this object is provided as a means + of detecting incorrect implementations of the + + SNMP." + ::= { snmp 11 } + +snmpInGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 12 } + +snmpInTotalReqVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + retrieved successfully by the SNMP protocol entity + as the result of receiving valid SNMP Get-Request + and Get-Next PDUs." + ::= { snmp 13 } + +snmpInTotalSetVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + altered successfully by the SNMP protocol entity + as the result of receiving valid SNMP Set-Request + PDUs." + ::= { snmp 14 } + +snmpInGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 15 } + +snmpInGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 16 } + +snmpInSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 17 } + +snmpInGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 18 } + +snmpInTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 19 } + +snmpOutTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `tooBig.'" + ::= { snmp 20 } + +snmpOutNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status is + `noSuchName'." + ::= { snmp 21 } + +snmpOutBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 22 } + +-- { snmp 23 } is not used + +snmpOutGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 24 } + +snmpOutGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 25 } + +snmpOutGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 26 } + +snmpOutSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 27 } + +snmpOutGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 28 } + +snmpOutTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 29 } + +snmpEnableAuthenTraps OBJECT-TYPE + SYNTAX INTEGER { enabled(1), disabled(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicates whether the SNMP agent process is + permitted to generate authentication-failure + traps. The value of this object overrides any + configuration information; as such, it provides a + means whereby all authentication-failure traps may + be disabled. + + Note that it is strongly recommended that this + object be stored in non-volatile memory so that it + remains constant between re-initializations of the + network management system." + ::= { snmp 30 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMibImports b/plugins/inputs/snmp/testdata/tableMibImports new file mode 100644 index 0000000000000..1516e7cbb840f --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMibImports @@ -0,0 +1,119 @@ +RFC1155-SMI DEFINITIONS ::= BEGIN + +EXPORTS -- EVERYTHING + internet, directory, mgmt, + experimental, private, enterprises, + OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax, + ApplicationSyntax, NetworkAddress, IpAddress, + Counter, Gauge, TimeTicks, Opaque; + + -- the path to the root + + internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 } + + directory OBJECT IDENTIFIER ::= { internet 1 } + + mgmt OBJECT IDENTIFIER ::= { internet 2 } + + experimental OBJECT IDENTIFIER ::= { internet 3 } + + private OBJECT IDENTIFIER ::= { internet 4 } + enterprises OBJECT IDENTIFIER ::= { private 1 } + + -- definition of object types + + OBJECT-TYPE MACRO ::= + BEGIN + TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax) + "ACCESS" Access + "STATUS" Status + VALUE NOTATION ::= value (VALUE ObjectName) + + Access ::= "read-only" + | "read-write" + | "write-only" + | "not-accessible" + Status ::= "mandatory" + | "optional" + | "obsolete" + END + + -- names of objects in the MIB + + ObjectName ::= + OBJECT IDENTIFIER + + -- syntax of objects in the MIB + + ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that simple SEQUENCEs are not directly + -- mentioned here to keep things simple (i.e., + -- prevent mis-use). However, application-wide + -- types which are IMPLICITly encoded simple + -- SEQUENCEs may appear in the following CHOICE + + application-wide + ApplicationSyntax + } + + SimpleSyntax ::= + CHOICE { + number + INTEGER, + string + OCTET STRING, + object + OBJECT IDENTIFIER, + empty + NULL + } + + ApplicationSyntax ::= + CHOICE { + address + NetworkAddress, + counter + Counter, + gauge + Gauge, + ticks + TimeTicks, + arbitrary + Opaque + + -- other application-wide types, as they are + -- defined, will be added here + } + + -- application-wide types + + NetworkAddress ::= + CHOICE { + internet + IpAddress + } + + IpAddress ::= + [APPLICATION 0] -- in network-byte order + IMPLICIT OCTET STRING (SIZE (4)) + + Counter ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + + Gauge ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + + TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + + Opaque ::= + [APPLICATION 4] -- arbitrary ASN.1 value, + IMPLICIT OCTET STRING -- "double-wrapped" + + END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tcpMib b/plugins/inputs/snmp/testdata/tcpMib new file mode 100644 index 0000000000000..03c47224da153 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMib @@ -0,0 +1,786 @@ +TCP-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, Unsigned32, + Gauge32, Counter32, Counter64, IpAddress, mib-2, + MODULE-COMPLIANCE, OBJECT-GROUP, InetAddress, + InetAddressType, InetPortNumber + FROM tcpMibImports; + + + +tcpMIB MODULE-IDENTITY + LAST-UPDATED "200502180000Z" -- 18 February 2005 + ORGANIZATION + "IETF IPv6 MIB Revision Team + http://www.ietf.org/html.charters/ipv6-charter.html" + CONTACT-INFO + "Rajiv Raghunarayan (editor) + + Cisco Systems Inc. + 170 West Tasman Drive + San Jose, CA 95134 + + Phone: +1 408 853 9612 + Email: + + Send comments to " + DESCRIPTION + "The MIB module for managing TCP implementations. + + Copyright (C) The Internet Society (2005). This version + of this MIB module is a part of RFC 4022; see the RFC + itself for full legal notices." + REVISION "200502180000Z" -- 18 February 2005 + DESCRIPTION + "IP version neutral revision, published as RFC 4022." + REVISION "9411010000Z" + DESCRIPTION + "Initial SMIv2 version, published as RFC 2012." + REVISION "9103310000Z" + DESCRIPTION + "The initial revision of this MIB module was part of + MIB-II." + ::= { mib-2 49 } + +-- the TCP base variables group + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +-- Scalars + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4), -- Van Jacobson's algorithm + rfc2988(5) -- RFC 2988 + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The algorithm used to determine the timeout value used for + retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The minimum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides a minimum value." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides an upper bound (as part of an + adaptive backoff algorithm)." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX Integer32 (-1 | 0..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The limit on the total number of TCP connections the entity + can support. In entities where the maximum number of + connections is dynamic, this object should contain the + value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the SYN-SENT state from the CLOSED state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times TCP connections have made a direct + transition to the SYN-RCVD state from the LISTEN state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the SYN-SENT + state or the SYN-RCVD state, plus the number of times that + TCP connections have made a direct transition to the + LISTEN state from the SYN-RCVD state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the ESTABLISHED + state or the CLOSE-WAIT state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP connections for which the current state + is either ESTABLISHED or CLOSE-WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + on currently established connections. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments retransmitted; that is, the + number of TCP segments transmitted containing one or more + previously transmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 12 } + +tcpInErrs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received in error (e.g., bad + TCP checksums). + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP segments sent containing the RST flag. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 15 } + +-- { tcp 16 } was used to represent the ipv6TcpConnTable in RFC 2452, +-- which has since been obsoleted. It MUST not be used. + +tcpHCInSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + + on currently established connections. This object is + the 64-bit equivalent of tcpInSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 17 } + +tcpHCOutSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. This object is the 64-bit + equivalent of tcpOutSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 18 } + +-- The TCP Connection table + +tcpConnectionTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about existing TCP + connections. Note that unlike earlier TCP MIBs, there + is a separate table for connections in the LISTEN state." + ::= { tcp 19 } + +tcpConnectionEntry OBJECT-TYPE + SYNTAX TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpConnectionTable containing + information about a particular current TCP connection. + Each row of this table is transient in that it ceases to + exist when (or soon after) the connection makes the + transition to the CLOSED state." + INDEX { tcpConnectionLocalAddressType, + tcpConnectionLocalAddress, + tcpConnectionLocalPort, + tcpConnectionRemAddressType, + tcpConnectionRemAddress, + tcpConnectionRemPort } + ::= { tcpConnectionTable 1 } + +TcpConnectionEntry ::= SEQUENCE { + tcpConnectionLocalAddressType InetAddressType, + tcpConnectionLocalAddress InetAddress, + tcpConnectionLocalPort InetPortNumber, + tcpConnectionRemAddressType InetAddressType, + tcpConnectionRemAddress InetAddress, + tcpConnectionRemPort InetPortNumber, + tcpConnectionState INTEGER, + tcpConnectionProcess Unsigned32 + } + +tcpConnectionLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionLocalAddress." + ::= { tcpConnectionEntry 1 } + +tcpConnectionLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionLocalAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 2 } + +tcpConnectionLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnectionEntry 3 } + +tcpConnectionRemAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionRemAddress." + ::= { tcpConnectionEntry 4 } + +tcpConnectionRemAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionRemAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 5 } + +tcpConnectionRemPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnectionEntry 6 } + +tcpConnectionState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The state of this TCP connection. + + The value listen(2) is included only for parallelism to the + old tcpConnTable and should not be used. A connection in + LISTEN state should be present in the tcpListenerTable. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnectionEntry 7 } + +tcpConnectionProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this connection, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpConnectionEntry 8 } + +-- The TCP Listener table + +tcpListenerTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about TCP listeners. A + listening application can be represented in three + possible ways: + + 1. An application that is willing to accept both IPv4 and + IPv6 datagrams is represented by + + a tcpListenerLocalAddressType of unknown (0) and + a tcpListenerLocalAddress of ''h (a zero-length + octet-string). + + 2. An application that is willing to accept only IPv4 or + IPv6 datagrams is represented by a + tcpListenerLocalAddressType of the appropriate address + type and a tcpListenerLocalAddress of '0.0.0.0' or '::' + respectively. + + 3. An application that is listening for data destined + only to a specific IP address, but from any remote + system, is represented by a tcpListenerLocalAddressType + of an appropriate address type, with + tcpListenerLocalAddress as the specific local address. + + NOTE: The address type in this table represents the + address type used for the communication, irrespective + of the higher-layer abstraction. For example, an + application using IPv6 'sockets' to communicate via + IPv4 between ::ffff:10.0.0.1 and ::ffff:10.0.0.2 would + use InetAddressType ipv4(1))." + ::= { tcp 20 } + +tcpListenerEntry OBJECT-TYPE + SYNTAX TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpListenerTable containing + information about a particular TCP listener." + INDEX { tcpListenerLocalAddressType, + tcpListenerLocalAddress, + tcpListenerLocalPort } + ::= { tcpListenerTable 1 } + +TcpListenerEntry ::= SEQUENCE { + tcpListenerLocalAddressType InetAddressType, + tcpListenerLocalAddress InetAddress, + tcpListenerLocalPort InetPortNumber, + tcpListenerProcess Unsigned32 + } + +tcpListenerLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpListenerLocalAddress. The value + should be unknown (0) if connection initiations to all + local IP addresses are accepted." + ::= { tcpListenerEntry 1 } + +tcpListenerLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. + + The value of this object can be represented in three + possible ways, depending on the characteristics of the + listening application: + + 1. For an application willing to accept both IPv4 and + IPv6 datagrams, the value of this object must be + ''h (a zero-length octet-string), with the value + of the corresponding tcpListenerLocalAddressType + object being unknown (0). + + 2. For an application willing to accept only IPv4 or + IPv6 datagrams, the value of this object must be + '0.0.0.0' or '::' respectively, with + tcpListenerLocalAddressType representing the + appropriate address type. + + 3. For an application which is listening for data + destined only to a specific IP address, the value + of this object is the specific local address, with + tcpListenerLocalAddressType representing the + appropriate address type. + + As this object is used in the index for the + tcpListenerTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed, using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpListenerEntry 2 } + +tcpListenerLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpListenerEntry 3 } + +tcpListenerProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this listener, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpListenerEntry 4 } + +-- The deprecated TCP Connection table + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A table containing information about existing IPv4-specific + TCP connections or listeners. This table has been + deprecated in favor of the version neutral + tcpConnectionTable." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A conceptual row of the tcpConnTable containing information + about a particular current IPv4 TCP connection. Each row + of this table is transient in that it ceases to exist when + (or soon after) the connection makes the transition to the + CLOSED state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= SEQUENCE { + tcpConnState INTEGER, + tcpConnLocalAddress IpAddress, + tcpConnLocalPort Integer32, + tcpConnRemAddress IpAddress, + tcpConnRemPort Integer32 + + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS deprecated + DESCRIPTION + "The state of this TCP connection. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local IP address for this TCP connection. In the case + of a connection in the listen state willing to + accept connections for any IP interface associated with the + node, the value 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- conformance information + +tcpMIBConformance OBJECT IDENTIFIER ::= { tcpMIB 2 } + +tcpMIBCompliances OBJECT IDENTIFIER ::= { tcpMIBConformance 1 } +tcpMIBGroups OBJECT IDENTIFIER ::= { tcpMIBConformance 2 } + +-- compliance statements + +tcpMIBCompliance2 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for systems that implement TCP. + + A number of INDEX objects cannot be + represented in the form of OBJECT clauses in SMIv2 but + have the following compliance requirements, + expressed in OBJECT clause form in this description + clause: + + -- OBJECT tcpConnectionLocalAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + + -- and IPv6 address types. + -- + -- OBJECT tcpConnectionRemAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. + -- + -- OBJECT tcpListenerLocalAddressType + -- SYNTAX InetAddressType { unknown(0), ipv4(1), + -- ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. The type unknown also + -- needs to be supported to identify a special + -- case in the listener table: a listen using + -- both IPv4 and IPv6 addresses on the device. + -- + " + MODULE -- this module + MANDATORY-GROUPS { tcpBaseGroup, tcpConnectionGroup, + tcpListenerGroup } + GROUP tcpHCGroup + DESCRIPTION + "This group is mandatory for systems that are capable + of receiving or transmitting more than 1 million TCP + segments per second. 1 million segments per second will + cause a Counter32 to wrap in just over an hour." + OBJECT tcpConnectionState + SYNTAX INTEGER { closed(1), listen(2), synSent(3), + synReceived(4), established(5), + finWait1(6), finWait2(7), closeWait(8), + lastAck(9), closing(10), timeWait(11) } + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required, nor is support for the value + deleteTCB (12)." + ::= { tcpMIBCompliances 2 } + +tcpMIBCompliance MODULE-COMPLIANCE + STATUS deprecated + DESCRIPTION + "The compliance statement for IPv4-only systems that + implement TCP. In order to be IP version independent, this + compliance statement is deprecated in favor of + tcpMIBCompliance2. However, agents are still encouraged + to implement these objects in order to interoperate with + the deployed base of managers." + + MODULE -- this module + MANDATORY-GROUPS { tcpGroup } + OBJECT tcpConnState + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required." + ::= { tcpMIBCompliances 1 } + +-- units of conformance + +tcpGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, tcpConnState, + tcpConnLocalAddress, tcpConnLocalPort, + tcpConnRemAddress, tcpConnRemPort, + tcpInErrs, tcpOutRsts } + STATUS deprecated + DESCRIPTION + "The tcp group of objects providing for management of TCP + entities." + ::= { tcpMIBGroups 1 } + +tcpBaseGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, + tcpInErrs, tcpOutRsts } + STATUS current + DESCRIPTION + "The group of counters common to TCP entities." + ::= { tcpMIBGroups 2 } + +tcpConnectionGroup OBJECT-GROUP + OBJECTS { tcpConnectionState, tcpConnectionProcess } + STATUS current + DESCRIPTION + "The group provides general information about TCP + connections." + ::= { tcpMIBGroups 3 } + +tcpListenerGroup OBJECT-GROUP + OBJECTS { tcpListenerProcess } + STATUS current + DESCRIPTION + "This group has objects providing general information about + TCP listeners." + ::= { tcpMIBGroups 4 } + +tcpHCGroup OBJECT-GROUP + OBJECTS { tcpHCInSegs, tcpHCOutSegs } + STATUS current + DESCRIPTION + "The group of objects providing for counters of high speed + TCP implementations." + ::= { tcpMIBGroups 5 } + +END diff --git a/plugins/inputs/snmp/testdata/tcpMibImports b/plugins/inputs/snmp/testdata/tcpMibImports new file mode 100644 index 0000000000000..f3b6b9d8d52fd --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMibImports @@ -0,0 +1,639 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InetPortNumber ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "Represents a 16 bit port number of an Internet transport + + layer protocol. Port numbers are assigned by IANA. A + current list of all assignments is available from + . + + The value zero is object-specific and must be defined as + part of the description of any object that uses this + syntax. Examples of the usage of zero might include + situations where a port number is unknown, or when the + value zero is used as a wildcard in a filter." + REFERENCE "STD 6 (RFC 768), STD 7 (RFC 793) and RFC 2960" + SYNTAX Unsigned32 (0..65535) + + +InetAddress ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "Denotes a generic Internet address. + + An InetAddress value is always interpreted within the context + of an InetAddressType value. Every usage of the InetAddress + textual convention is required to specify the InetAddressType + object that provides the context. It is suggested that the + InetAddressType object be logically registered before the + object(s) that use the InetAddress textual convention, if + they appear in the same logical row. + + The value of an InetAddress object must always be + consistent with the value of the associated InetAddressType + object. Attempts to set an InetAddress object to a value + inconsistent with the associated InetAddressType + must fail with an inconsistentValue error. + + When this textual convention is used as the syntax of an + index object, there may be issues with the limit of 128 + sub-identifiers specified in SMIv2, STD 58. In this case, + the object definition MUST include a 'SIZE' clause to + limit the number of potential instance sub-identifiers; + otherwise the applicable constraints MUST be stated in + the appropriate conceptual row DESCRIPTION clauses, or + in the surrounding documentation if there is no single + DESCRIPTION clause that is appropriate." + SYNTAX OCTET STRING (SIZE (0..255)) + +InetAddressType ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "A value that represents a type of Internet address. + + unknown(0) An unknown address type. This value MUST + be used if the value of the corresponding + InetAddress object is a zero-length string. + It may also be used to indicate an IP address + that is not in one of the formats defined + below. + + ipv4(1) An IPv4 address as defined by the + InetAddressIPv4 textual convention. + + ipv6(2) An IPv6 address as defined by the + InetAddressIPv6 textual convention. + + ipv4z(3) A non-global IPv4 address including a zone + index as defined by the InetAddressIPv4z + textual convention. + + ipv6z(4) A non-global IPv6 address including a zone + index as defined by the InetAddressIPv6z + textual convention. + + dns(16) A DNS domain name as defined by the + InetAddressDNS textual convention. + + Each definition of a concrete InetAddressType value must be + accompanied by a definition of a textual convention for use + with that InetAddressType. + + To support future extensions, the InetAddressType textual + convention SHOULD NOT be sub-typed in object type definitions. + It MAY be sub-typed in compliance statements in order to + require only a subset of these address types for a compliant + implementation. + + Implementations must ensure that InetAddressType objects + and any dependent objects (e.g., InetAddress objects) are + consistent. An inconsistentValue error must be generated + if an attempt to change an InetAddressType object would, + for example, lead to an undefined InetAddress value. In + + particular, InetAddressType/InetAddress pairs must be + changed together if the address type changes (e.g., from + ipv6(2) to ipv4(1))." + SYNTAX INTEGER { + unknown(0), + ipv4(1), + ipv6(2), + ipv4z(3), + ipv6z(4), + dns(16) + } + + + + + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib deleted file mode 100644 index c6e7a2a8962b6..0000000000000 --- a/plugins/inputs/snmp/testdata/test.mib +++ /dev/null @@ -1,97 +0,0 @@ -TEST DEFINITIONS ::= BEGIN - -testOID ::= { 1 0 0 } - -testTable OBJECT-TYPE - SYNTAX SEQUENCE OF testTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 0 } - -testTableEntry OBJECT-TYPE - SYNTAX TestTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - server - } - ::= { testTable 1 } - -TestTableEntry ::= - SEQUENCE { - server OCTET STRING, - connections INTEGER, - latency OCTET STRING, - description OCTET STRING, - } - -server OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX INTEGER - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 2 } - -latency OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 3 } - -description OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 4 } - -hostname OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testOID 1 1 } - -testSecondaryTable OBJECT-TYPE - SYNTAX SEQUENCE OF testSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 3 } - -testSecondaryTableEntry OBJECT-TYPE - SYNTAX TestSecondaryTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - instance - } - ::= { testSecondaryTable 1 } - -TestSecondaryTableEntry ::= - SEQUENCE { - instance OCTET STRING, - connections INTEGER, - testTableIndex INTEGER, - } - -instance OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 2 } - -testTableIndex OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testSecondaryTableEntry 3 } -END diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 06bebbcad6176..e4a91080704e6 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,15 +1,16 @@ # SNMP Legacy Input Plugin -The SNMP input plugin gathers metrics from SNMP agents +## Deprecated in version 1.0. Use [SNMP input plugin][] -### Configuration: +The SNMP input plugin gathers metrics from SNMP agents +## Configuration -#### Very simple example +### Very simple example In this example, the plugin will gather value of OIDS: - - `.1.3.6.1.2.1.2.2.1.4.1` +- `.1.3.6.1.2.1.2.2.1.4.1` ```toml # Very Simple Example @@ -26,36 +27,34 @@ In this example, the plugin will gather value of OIDS: get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] ``` - -#### Simple example +### Simple example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** +- named **ifnumber** +- named **interface_speed** With **inputs.snmp.get** section the plugin gets the oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* As you can see *ifSpeed* is not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` Also as the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.1.0` - `.1.3.6.1.2.1.2.2.1.5.1` - ```toml # Simple example [[inputs.snmp]] @@ -86,36 +85,35 @@ In this example, the plugin will gather value of OIDS: ``` - -#### Simple bulk example +### Simple bulk example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** - - named **if_out_octets** +- named **ifnumber** +- named **interface_speed** +- named **if_out_octets** With **inputs.snmp.get** section the plugin gets oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* With **inputs.snmp.bulk** section the plugin gets the oid number: - - **if_out_octets** => *ifOutOctets* +- **if_out_octets** => *ifOutOctets* As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` - - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` Also, the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` And **if_out_octets** is a bulk request, the plugin will gathers all OIDS in the table. @@ -138,7 +136,6 @@ In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.2.1.16.5` - `...` - ```toml # Simple bulk example [[inputs.snmp]] @@ -172,8 +169,7 @@ In this example, the plugin will gather value of OIDS: oid = "ifOutOctets" ``` - -#### Table example +### Table example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -183,11 +179,11 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable1** +- named **iftable1** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable1** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable1** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -237,8 +233,7 @@ OIDS in the table and in the subtables oid = ".1.3.6.1.2.1.31.1.1.1" ``` - -#### Table with subtable example +### Table with subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -248,12 +243,12 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable2** +- named **iftable2** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` +- **iftable2** => `.1.3.6.1.2.1.2.2.1.13` Also **iftable2** is a table, the plugin will gathers all OIDS in subtables: @@ -264,7 +259,6 @@ OIDS in subtables: - `.1.3.6.1.2.1.2.2.1.13.4` - `.1.3.6.1.2.1.2.2.1.13....` - ```toml # Table with subtable example [[inputs.snmp]] @@ -293,19 +287,18 @@ OIDS in subtables: # oid attribute is useless ``` - -#### Table with mapping example +### Table with mapping example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable3** +- named **iftable3** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable3** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -332,11 +325,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -397,20 +391,19 @@ Note: the plugin will add instance name as tag *instance* # if empty, get all subtables ``` - -#### Table with both mapping and subtable example +### Table with both mapping and subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable4** +- named **iftable4** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable4** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -431,11 +424,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -457,8 +451,6 @@ the following OIDS: Note: the plugin will add instance name as tag *instance* - - ```toml # Table with both mapping and subtable example [[inputs.snmp]] @@ -503,7 +495,7 @@ Note: the plugin will add instance name as tag *instance* unit = "octets" ``` -#### Configuration notes +### Configuration notes - In **inputs.snmp.table** section, the `oid` attribute is useless if the `sub_tables` attributes is defined @@ -511,39 +503,41 @@ Note: the plugin will add instance name as tag *instance* - In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` as `oid` attribute instead of a valid OID -### Measurements & Fields: +## Measurements & Fields With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - ifHCOutOctets + - ifHCOutOctets - ifInDiscards - - ifInDiscards + - ifInDiscards - ifHCInOctets - - ifHCInOctets + - ifHCInOctets -### Tags: +## Tags With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - host - - instance - - unit + - host + - instance + - unit - ifInDiscards - - host - - instance + - host + - instance - ifHCInOctets - - host - - instance - - unit + - host + - instance + - unit -### Example Output: +## Example Output With the last example (Table with both mapping and subtable example): -``` +```shell ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 ``` + +[SNMP input plugin]: /plugins/inputs/snmp diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 604a2205c0d2c..ce454cbfbad36 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -8,10 +8,10 @@ import ( "strings" "time" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/gosnmp/gosnmp" ) // Snmp is a snmp plugin @@ -46,9 +46,9 @@ type Host struct { // Table Table []HostTable // Oids - getOids []Data - bulkOids []Data - tables []HostTable + internalGetOids []Data + bulkOids []Data + tables []HostTable // array of processed oids // to skip oid duplication processedOids []string @@ -250,7 +250,7 @@ func fillnode(parentNode Node, oidName string, ids []string) { } } -func findnodename(node Node, ids []string) (string, string) { +func findNodeName(node Node, ids []string) (oidName string, instance string) { // ids = ["1", "3", "6", ...] if len(ids) == 1 { return node.name, ids[0] @@ -259,7 +259,7 @@ func findnodename(node Node, ids []string) (string, string) { // Get node subnode, ok := node.subnodes[id] if ok { - return findnodename(subnode, ids) + return findNodeName(subnode, ids) } // We got a node // Get node name @@ -345,7 +345,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { oid.rawOid = oidstring } } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } for _, oidName := range host.Collect { @@ -362,7 +362,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.rawOid = oid.Oid } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } } // Get GETBULK oids @@ -463,7 +463,7 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list + // Add the new oid to bulkOids list h.bulkOids = append(h.bulkOids, oid) } } @@ -569,8 +569,8 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list - h.getOids = append(h.getOids, oid) + // Add the new oid to internalGetOids list + h.internalGetOids = append(h.internalGetOids, oid) } } default: @@ -606,7 +606,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { defer snmpClient.Conn.Close() // Prepare OIDs oidsList := make(map[string]Data) - for _, oid := range h.getOids { + for _, oid := range h.internalGetOids { oidsList[oid.rawOid] = oid } oidsNameList := make([]string, 0, len(oidsList)) @@ -701,7 +701,7 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { // Prepare host and port host, portStr, err := net.SplitHostPort(h.Address) if err != nil { - portStr = string("161") + portStr = "161" } // convert port_str to port in uint16 port64, err := strconv.ParseUint(portStr, 10, 16) @@ -763,7 +763,7 @@ func (h *Host) HandleResponse( var oidName string var instance string // Get oidname and instance from translate file - oidName, instance = findnodename(initNode, + oidName, instance = findNodeName(initNode, strings.Split(variable.Name[1:], ".")) // Set instance tag // From mapping table diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index f117c35cbeb56..a7f75afe3fe3d 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -6,21 +6,13 @@ notifications (traps and inform requests). Notifications are received on plain UDP. The port to listen is configurable. -### Prerequisites +## Note about Paths -This plugin uses the `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. +## Configuration -### Configuration ```toml [[inputs.snmp_trap]] ## Transport, local address, and port to listen on. Transport must @@ -55,7 +47,7 @@ information. # priv_password = "" ``` -#### Using a Privileged Port +### Using a Privileged Port On many operating systems, listening on a privileged port (a port number less than 1024) requires extra permission. Since the default @@ -73,7 +65,7 @@ the privileged port. To use a privileged port on Linux, you can use setcap to enable the CAP_NET_BIND_SERVICE capability on the telegraf binary: -``` +```shell setcap cap_net_bind_service=+ep /usr/bin/telegraf ``` @@ -84,21 +76,22 @@ On Mac OS, listening on privileged ports is unrestricted on versions - snmp_trap - tags: - - source (string, IP address of trap source) - - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) - - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) - - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) - - version (string, "1" or "2c" or "3") - - context_name (string, value from v3 trap) - - engine_id (string, value from v3 trap) - - community (string, value from 1 or 2c trap) + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - context_name (string, value from v3 trap) + - engine_id (string, value from v3 trap) + - community (string, value from 1 or 2c trap) - fields: - - Fields are mapped from variables in the trap. Field names are + - Fields are mapped from variables in the trap. Field names are the trap variable names after MIB lookup. Field values are trap variable values. ### Example Output -``` + +```shell snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c,community=public snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c,community=public sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 ``` diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 9fffd8968d593..7bd6ba61d933d 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -3,28 +3,20 @@ package snmp_trap import ( "fmt" "net" - "os" - "path/filepath" "strconv" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/sleepinggenius2/gosmi" - "github.com/sleepinggenius2/gosmi/types" "github.com/gosnmp/gosnmp" ) var defaultTimeout = config.Duration(time.Second * 5) -type mibEntry struct { - mibName string - oidText string -} - type SnmpTrap struct { ServiceAddress string `toml:"service_address"` Timeout config.Duration `toml:"timeout"` @@ -45,7 +37,7 @@ type SnmpTrap struct { acc telegraf.Accumulator listener *gosnmp.TrapListener timeFunc func() time.Time - lookupFunc func(string) (mibEntry, error) + lookupFunc func(string) (snmp.MibEntry, error) errCh chan error makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc @@ -102,61 +94,23 @@ func init() { inputs.Add("snmp_trap", func() telegraf.Input { return &SnmpTrap{ timeFunc: time.Now, - lookupFunc: lookup, + lookupFunc: snmp.TrapLookup, ServiceAddress: "udp://:162", Timeout: defaultTimeout, + Path: []string{"/usr/share/snmp/mibs"}, Version: "2c", } }) } func (s *SnmpTrap) Init() error { - // must init, append path for each directory, load module for every file - // or gosmi will fail without saying why - gosmi.Init() - err := s.getMibsPath() + err := snmp.LoadMibsFromPath(s.Path, s.Log) if err != nil { s.Log.Errorf("Could not get path %v", err) } return nil } -func (s *SnmpTrap) getMibsPath() error { - var folders []string - for _, mibPath := range s.Path { - gosmi.AppendPath(mibPath) - folders = append(folders, mibPath) - err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { - if info.Mode()&os.ModeSymlink != 0 { - s, _ := os.Readlink(path) - folders = append(folders, s) - } - return nil - }) - if err != nil { - s.Log.Errorf("Filepath could not be walked %v", err) - } - for _, folder := range folders { - err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - gosmi.AppendPath(path) - } else if info.Mode()&os.ModeSymlink == 0 { - _, err := gosmi.LoadModule(info.Name()) - if err != nil { - s.Log.Errorf("Module could not be loaded %v", err) - } - } - return nil - }) - if err != nil { - s.Log.Errorf("Filepath could not be walked %v", err) - } - } - folders = []string{} - } - return nil -} - func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { s.acc = acc s.listener = gosnmp.NewTrapListener() @@ -277,17 +231,16 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { func (s *SnmpTrap) Stop() { s.listener.Close() - defer gosmi.Exit() err := <-s.errCh if nil != err { s.Log.Errorf("Error stopping trap listener %v", err) } } -func setTrapOid(tags map[string]string, oid string, e mibEntry) { +func setTrapOid(tags map[string]string, oid string, e snmp.MibEntry) { tags["oid"] = oid - tags["name"] = e.oidText - tags["mib"] = e.mibName + tags["name"] = e.OidText + tags["mib"] = e.MibName } func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { @@ -347,7 +300,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - var e mibEntry + var e snmp.MibEntry var err error e, err = s.lookupFunc(val) if nil != err { @@ -355,7 +308,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - value = e.oidText + value = e.OidText // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. // If v.Name is this oid, set a tag of the trap name. @@ -373,7 +326,7 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return } - name := e.oidText + name := e.OidText fields[name] = value } @@ -395,23 +348,3 @@ func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { s.acc.AddFields("snmp_trap", fields, tags, tm) } } - -func lookup(oid string) (e mibEntry, err error) { - var node gosmi.SmiNode - node, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) - - // ensure modules are loaded or node will be empty (might not error) - if err != nil { - return e, err - } - - e.oidText = node.RenderQualified() - - i := strings.Index(e.oidText, "::") - if i == -1 { - return e, fmt.Errorf("not found") - } - e.mibName = e.oidText[:i] - e.oidText = e.oidText[i+2:] - return e, nil -} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index f917a7bbff918..6c7c7df33e20f 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -10,11 +10,11 @@ import ( "time" "github.com/gosnmp/gosnmp" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" ) func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { @@ -133,7 +133,7 @@ func TestReceiveTrap(t *testing.T) { type entry struct { oid string - e mibEntry + e snmp.MibEntry } // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will @@ -181,23 +181,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -264,16 +264,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.2.3.0.55", - mibEntry{ - "enterpriseMIB", - "enterpriseOID", + snmp.MibEntry{ + MibName: "enterpriseMIB", + OidText: "enterpriseOID", }, }, }, @@ -318,16 +318,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "coldStartMIB", - "coldStartOID", + snmp.MibEntry{ + MibName: "coldStartMIB", + OidText: "coldStartOID", }, }, }, @@ -376,23 +376,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -440,23 +440,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -503,23 +503,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -565,23 +565,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -627,23 +627,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -689,23 +689,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -751,23 +751,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -813,23 +813,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -877,23 +877,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -941,23 +941,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1005,23 +1005,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1069,23 +1069,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1133,23 +1133,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1197,23 +1197,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1261,13 +1261,13 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, - lookupFunc: func(input string) (mibEntry, error) { + lookupFunc: func(input string) (snmp.MibEntry, error) { for _, entry := range tt.entries { if input == entry.oid { - return mibEntry{entry.e.mibName, entry.e.oidText}, nil + return snmp.MibEntry{MibName: entry.e.MibName, OidText: entry.e.OidText}, nil } } - return mibEntry{}, fmt.Errorf("Unexpected oid") + return snmp.MibEntry{}, fmt.Errorf("unexpected oid") }, //if cold start be answer otherwise err Log: testutil.Logger{}, @@ -1311,7 +1311,6 @@ func TestReceiveTrap(t *testing.T) { testutil.SortMetrics()) }) } - } func TestGosmiSingleMib(t *testing.T) { @@ -1378,7 +1377,7 @@ func TestGosmiSingleMib(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, - lookupFunc: lookup, + lookupFunc: snmp.TrapLookup, Log: testutil.Logger{}, Version: "2c", Path: []string{testDataPath}, diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index f5189a195af9d..c445e0f5a5c78 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -6,7 +6,7 @@ streaming (tcp, unix) or datagram (udp, unixgram) protocols. The plugin expects messages in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -### Configuration: +## Configuration This is a sample configuration for the plugin. @@ -92,7 +92,7 @@ at least 8MB before trying to run large amounts of UDP traffic to your instance. Check the current UDP/IP receive buffer limit & default by typing the following commands: -``` +```sh sysctl net.core.rmem_max sysctl net.core.rmem_default ``` @@ -100,7 +100,7 @@ sysctl net.core.rmem_default If the values are less than 8388608 bytes you should add the following lines to the /etc/sysctl.conf file: -``` +```text net.core.rmem_max=8388608 net.core.rmem_default=8388608 ``` @@ -108,7 +108,7 @@ net.core.rmem_default=8388608 Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following commands as root: -``` +```sh sysctl -w net.core.rmem_max=8388608 sysctl -w net.core.rmem_default=8388608 ``` @@ -123,20 +123,20 @@ happens Check the current UDP/IP buffer limit by typing the following command: -``` +```sh sysctl kern.ipc.maxsockbuf ``` If the value is less than 9646900 bytes you should add the following lines to the /etc/sysctl.conf file (create it if necessary): -``` +```text kern.ipc.maxsockbuf=9646900 ``` Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following command as root: -``` +```sh sysctl -w kern.ipc.maxsockbuf=9646900 ``` diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index a3ccacae1ceb2..1d363d8504669 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -12,19 +12,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/wlog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") -// testEmptyLog is a helper function to ensure no data is written to log. +// prepareLog is a helper function to ensure no data is written to log. // Should be called at the start of the test, and returns a function which should run at the end. -func testEmptyLog(t *testing.T) func() { +func prepareLog(t *testing.T) func() { buf := bytes.NewBuffer(nil) log.SetOutput(wlog.NewWriter(buf)) @@ -37,16 +37,17 @@ func testEmptyLog(t *testing.T) func() { for { line, err := buf.ReadBytes('\n') if err != nil { - assert.Equal(t, io.EOF, err) + require.Equal(t, io.EOF, err) break } - assert.Empty(t, string(line), "log not empty") + require.Empty(t, string(line), "log not empty") } } } func TestSocketListener_tcp_tls(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -84,8 +85,8 @@ func TestSocketListener_unix_tls(t *testing.T) { defer sl.Stop() tlsCfg, err := pki.TLSClientConfig().TLSConfig() - tlsCfg.InsecureSkipVerify = true require.NoError(t, err) + tlsCfg.InsecureSkipVerify = true secureClient, err := tls.Dial("unix", sock, tlsCfg) require.NoError(t, err) @@ -94,7 +95,8 @@ func TestSocketListener_unix_tls(t *testing.T) { } func TestSocketListener_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -113,7 +115,8 @@ func TestSocketListener_tcp(t *testing.T) { } func TestSocketListener_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -137,7 +140,8 @@ func TestSocketListener_unix(t *testing.T) { defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() f, _ := os.Create(sock) require.NoError(t, f.Close()) @@ -167,7 +171,8 @@ func TestSocketListener_unixgram(t *testing.T) { defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() _, err = os.Create(sock) require.NoError(t, err) @@ -188,7 +193,8 @@ func TestSocketListener_unixgram(t *testing.T) { } func TestSocketListenerDecode_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -208,7 +214,8 @@ func TestSocketListenerDecode_tcp(t *testing.T) { } func TestSocketListenerDecode_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -256,18 +263,18 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { m3 := acc.Metrics[2] acc.Unlock() - assert.Equal(t, "test", m1.Measurement) - assert.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) - assert.True(t, time.Unix(0, 123456789).Equal(m1.Time)) + require.Equal(t, "test", m1.Measurement) + require.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) + require.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) + require.True(t, time.Unix(0, 123456789).Equal(m1.Time)) - assert.Equal(t, "test", m2.Measurement) - assert.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) - assert.True(t, time.Unix(0, 123456790).Equal(m2.Time)) + require.Equal(t, "test", m2.Measurement) + require.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) + require.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) + require.True(t, time.Unix(0, 123456790).Equal(m2.Time)) - assert.Equal(t, "test", m3.Measurement) - assert.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) - assert.True(t, time.Unix(0, 123456791).Equal(m3.Time)) + require.Equal(t, "test", m3.Measurement) + require.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) + require.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) + require.True(t, time.Unix(0, 123456791).Equal(m3.Time)) } diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md index c20fa92836c70..c9d1a6f36ba11 100644 --- a/plugins/inputs/solr/README.md +++ b/plugins/inputs/solr/README.md @@ -7,7 +7,7 @@ More about [performance statistics](https://cwiki.apache.org/confluence/display/ Tested from 3.5 to 7.* -### Configuration: +## Configuration ```toml [[inputs.solr]] @@ -22,9 +22,9 @@ Tested from 3.5 to 7.* # password = "pa$$word" ``` -### Example output of gathered metrics: +## Example output of gathered metrics -``` +```shell ➜ ~ telegraf -config telegraf.conf -input-filter solr -test * Plugin: solr, Collection 1 > solr_core,core=main,handler=searcher,host=testhost deleted_docs=17616645i,max_docs=261848363i,num_docs=244231718i 1478214949000000000 diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 08531e7433b34..c74c3bcf6b09e 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -202,7 +202,7 @@ func getCoresFromStatus(adminCoresStatus *AdminCoresStatus) []string { // Add core metrics from admin to accumulator // This is the only point where size_in_bytes is available (as far as I checked) -func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, time time.Time) { +func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, measurementTime time.Time) { for core, metrics := range adminCoreStatus.Status { coreFields := map[string]interface{}{ "deleted_docs": metrics.Index.DeletedDocs, @@ -214,13 +214,13 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo "solr_admin", coreFields, map[string]string{"core": core}, - time, + measurementTime, ) } } // Add core metrics section to accumulator -func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var coreMetrics map[string]Core if len(mBeansData.SolrMbeans) < 2 { return fmt.Errorf("no core metric data to unmarshal") @@ -243,14 +243,14 @@ func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBea map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil } // Add query metrics section to accumulator -func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var queryMetrics map[string]QueryHandler if len(mBeansData.SolrMbeans) < 4 { @@ -284,7 +284,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -324,7 +324,7 @@ func convertQueryHandlerMap(value map[string]interface{}) map[string]interface{} } // Add update metrics section to accumulator -func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var updateMetrics map[string]UpdateHandler if len(mBeansData.SolrMbeans) < 6 { @@ -363,7 +363,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -404,7 +404,7 @@ func getInt(unk interface{}) int64 { } // Add cache metrics section to accumulator -func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { if len(mBeansData.SolrMbeans) < 8 { return fmt.Errorf("no cache metric data to unmarshal") } @@ -444,7 +444,7 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md index cc8a464016d28..a932a71c84128 100644 --- a/plugins/inputs/sql/README.md +++ b/plugins/inputs/sql/README.md @@ -5,7 +5,7 @@ types are supported and their settings might differ (especially the connection p Please check the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the `driver` name and options for the data-source-name (`dsn`) options. -### Configuration +## Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. @@ -73,13 +73,13 @@ generate it using `telegraf --usage `. ## Column names containing fields (explicit types) ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over - ## the automatic (driver-based) conversion below. - ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. # field_columns_float = [] # field_columns_int = [] - # field_columns_uint = [] - # field_columns_bool = [] - # field_columns_string = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] ## Column names containing fields (automatic types) ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty @@ -89,16 +89,20 @@ generate it using `telegraf --usage `. # field_columns_exclude = [] ``` -### Options -#### Driver +## Options + +### Driver + The `driver` and `dsn` options specify how to connect to the database. As especially the `dsn` format and values vary with the `driver` refer to the list of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible values and more details. -#### Connection limits +### Connection limits + With these options you can limit the number of connections kept open by this plugin. Details about the exact workings can be found in the [golang sql documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime). -#### Query sections +### Query sections + Multiple `query` sections can be specified for this plugin. Each specified query will first be prepared on the server and then executed in every interval using the column mappings specified. Please note that `tag` and `field` columns are not exclusive, i.e. a column can be added to both. When using both `include` and `exclude` lists, the `exclude` @@ -107,31 +111,38 @@ the filter. In case any the columns specified in `measurement_col` or `time_col` the plugin falls-back to the documented defaults. Fields or tags specified in the includes of the options but missing in the returned query are silently ignored. -### Types +## Types + This plugin relies on the driver to do the type conversion. For the different properties of the metric the following types are accepted. -#### Measurement +### Measurement + Only columns of type `string` are accepted. -#### Time +### Time + For the metric time columns of type `time` are accepted directly. For numeric columns, `time_format` should be set to any of `unix`, `unix_ms`, `unix_ns` or `unix_us` accordingly. By default the a timestamp in `unix` format is expected. For string columns, please specify the `time_format` accordingly. See the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for details. -#### Tags +### Tags + For tags columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Those values will be converted to string. -#### Fields +### Fields + For fields columns with textual values (`string` and `bytes`), signed and unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and `time` values are accepted. Here `bytes` will be converted to `string`, signed and unsigned integer values will be converted to `int64` or `uint64` respectively. Floating-point values are converted to `float64` and `time` is converted to a nanosecond timestamp of type `int64`. -### Example Output +## Example Output + Using the [MariaDB sample database](https://www.mariadbtutorial.com/getting-started/mariadb-sample-database) and the configuration + ```toml [[inputs.sql]] driver = "mysql" @@ -145,7 +156,8 @@ configuration ``` Telegraf will output the following metrics -``` + +```shell nation,host=Hugin,name=John guest_id=1i 1611332164000000000 nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000 nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000 diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 10f6064581dfb..1ee48ccbae5da 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,18 +1,21 @@ # SQL Server Input Plugin + The `sqlserver` plugin provides metrics for your SQL Server instance. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. -### The SQL Server plugin supports the following editions/versions of SQL Server +## The SQL Server plugin supports the following editions/versions of SQL Server + - SQL Server - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) - - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will - need to be addressed by the community. + - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance +- Azure SQL Elastic Pool + +## Additional Setup -### Additional Setup: +You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: -You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: ```sql USE master; GO @@ -25,6 +28,7 @@ GO ``` For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database. + ```sql CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword'; GO @@ -32,7 +36,28 @@ GRANT VIEW DATABASE STATE TO [telegraf]; GO ``` -### Configuration: +For Azure SQL Elastic Pool, please follow the following instructions to collect metrics. + +On master logical database, create an SQL login 'telegraf' and assign it to the server-level role ##MS_ServerStateReader##. + +```sql +CREATE LOGIN [telegraf] WITH PASSWORD = N'mystrongpassword'; +GO +ALTER SERVER ROLE ##MS_ServerStateReader## + ADD MEMBER [telegraf]; +GO +``` + +Elastic pool metrics can be collected from any database in the pool if a user for the `telegraf` login is created in that database. For collection to work, this database must remain in the pool, and must not be renamed. If you plan to add/remove databases from this pool, create a separate database for monitoring purposes that will remain in the pool. + +> Note: To avoid duplicate monitoring data, do not collect elastic pool metrics from more than one database in the same pool. + +```sql +GO +CREATE USER [telegraf] FOR LOGIN telegraf; +``` + +## Configuration ```toml [agent] @@ -58,42 +83,34 @@ GO ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. - ## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" - - ## Queries enabled by default for database_type = "AzureSQLDB" are - - ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, - ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" - # database_type = "AzureSQLDB" + database_type = "SQLServer" - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] + ## A list of queries to include. If not specified, all the below listed queries are used. + include_query = [] ## A list of queries to explicitly ignore. - # exclude_query = [] - - ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - - ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, - ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - - # database_type = "AzureSQLManagedInstance" - - # include_query = [] - - # exclude_query = [] + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] ## Queries enabled by default for database_type = "SQLServer" are - ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, - ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates - database_type = "SQLServer" + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers - include_query = [] + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - ## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default - exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + ## Queries enabled by default for database_type = "AzureSQLPool" are - + ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, + ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers - ## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## Following are old config settings + ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use ## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version @@ -126,30 +143,39 @@ GO ## - AzureSQLDBRequests ## - AzureSQLDBSchedulers - ## database_type = AzureSQLManagedInstance by default collects the following queries - ## - AzureSQLMIResourceStats - ## - AzureSQLMIResourceGovernance - ## - AzureSQLMIDatabaseIO - ## - AzureSQLMIServerProperties - ## - AzureSQLMIOsWaitstats - ## - AzureSQLMIMemoryClerks - ## - AzureSQLMIPerformanceCounters - ## - AzureSQLMIRequests - ## - AzureSQLMISchedulers - - ## database_type = SQLServer by default collects the following queries - ## - SQLServerPerformanceCounters - ## - SQLServerWaitStatsCategorized - ## - SQLServerDatabaseIO - ## - SQLServerProperties - ## - SQLServerMemoryClerks - ## - SQLServerSchedulers - ## - SQLServerRequests - ## - SQLServerVolumeSpace - ## - SQLServerCpu - ## and following as optional (if mentioned in the include_query list) - ## - SQLServerAvailabilityReplicaStates - ## - SQLServerDatabaseReplicaStates + ## database_type = AzureSQLManagedInstance by default collects the following queries + ## - AzureSQLMIResourceStats + ## - AzureSQLMIResourceGovernance + ## - AzureSQLMIDatabaseIO + ## - AzureSQLMIServerProperties + ## - AzureSQLMIOsWaitstats + ## - AzureSQLMIMemoryClerks + ## - AzureSQLMIPerformanceCounters + ## - AzureSQLMIRequests + ## - AzureSQLMISchedulers + + ## database_type = AzureSQLPool by default collects the following queries + ## - AzureSQLPoolResourceStats + ## - AzureSQLPoolResourceGovernance + ## - AzureSQLPoolDatabaseIO + ## - AzureSQLPoolOsWaitStats, + ## - AzureSQLPoolMemoryClerks + ## - AzureSQLPoolPerformanceCounters + ## - AzureSQLPoolSchedulers + + ## database_type = SQLServer by default collects the following queries + ## - SQLServerPerformanceCounters + ## - SQLServerWaitStatsCategorized + ## - SQLServerDatabaseIO + ## - SQLServerProperties + ## - SQLServerMemoryClerks + ## - SQLServerSchedulers + ## - SQLServerRequests + ## - SQLServerVolumeSpace + ## - SQLServerCpu + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates ## Version 2 by default collects the following queries ## Version 2 is being deprecated, please consider using database_type. @@ -175,10 +201,9 @@ GO ## - MemoryClerk ## - VolumeSpace ## - PerformanceMetrics - ``` -### Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) +## Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) Azure SQL Database supports 2 main methods of authentication: [SQL authentication and AAD authentication](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). The recommended practice is to [use AAD authentication when possible](https://docs.microsoft.com/en-us/azure/azure-sql/database/authentication-aad-overview). @@ -186,10 +211,11 @@ AAD is a more modern authentication protocol, allows for easier credential/role To enable support for AAD authentication, we leverage the existing AAD authentication support in the [SQL Server driver for Go](https://github.com/denisenkom/go-mssqldb#azure-active-directory-authentication---preview) -#### How to use AAD Auth with MSI +### How to use AAD Auth with MSI - Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). - On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. + ```sql EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = '''') BEGIN @@ -198,25 +224,31 @@ EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = ''] FROM EXTERNAL PROVIDER') EXECUTE ('GRANT VIEW DATABASE STATE TO []') ``` + - On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. - On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" + ```toml servers = [ "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", ] auth_method = "AAD" ``` + - Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). -### Metrics: +## Metrics + To provide backwards compatibility, this plugin support two versions of metrics queries. **Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. -#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type. +### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type + The original metrics queries provide: + - *Performance counters*: 1000+ metrics from `sys.dm_os_performance_counters` - *Performance metrics*: special performance and ratio metrics - *Wait stats*: wait tasks categorized from `sys.dm_os_wait_stats` @@ -229,12 +261,15 @@ The original metrics queries provide: - *CPU*: cpu usage from `sys.dm_os_ring_buffers` If you are using the original queries all stats have the following tags: + - `servername`: hostname:instance - `type`: type of stats to easily filter measurements -#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type. +### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type + The new (version 2) metrics provide: -- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` + +- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats`. - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. - *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more @@ -263,115 +298,133 @@ The new (version 2) metrics provide: - Stats from `sys.dm_db_wait_stats` - Resource governance stats from `sys.dm_user_db_resource_governance` - Stats from `sys.dm_db_resource_stats` - +### database_type = "AzureSQLDB" -#### database_type = "AzureSQLDB These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs: -- AzureSQLDBDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -= AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` -- AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLDBServerProperties: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. + +- *AzureSQLDBDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLDBMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLDBResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` +- *AzureSQLDBPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLDBServerProperties*: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLDBWaitstats*: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. - *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots. +### database_type = "AzureSQLManagedInstance" -#### database_type = "AzureSQLManagedInstance These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs: -- AzureSQLMIDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -- AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` -- AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLMIServerProperties: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide -- AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` -- AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. - -#### database_type = "SQLServer -- SQLServerDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` -- SQLServerMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. -- SQLServerPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: + +- *AzureSQLMIDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLMIMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLMIResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` +- *AzureSQLMIPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLMIServerProperties*: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLMIOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide +- *AzureSQLMIRequests*: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` +- *AzureSQLMISchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +### database_type = "AzureSQLPool" + +These are metrics for Azure SQL to monitor resources usage at Elastic Pool level. These metrics require additional permissions to be collected, please ensure to check additional setup section in this documentation. + +- *AzureSQLPoolResourceStats*: Returns resource usage statistics for the current elastic pool in a SQL Database server. Queried from `sys.dm_resource_governor_resource_pools_history_ex`. +- *AzureSQLPoolResourceGovernance*: Returns actual configuration and capacity settings used by resource governance mechanisms in the current elastic pool. Queried from `sys.dm_user_db_resource_governance`. +- *AzureSQLPoolDatabaseIO*: Returns I/O statistics for data and log files for each database in the pool. Queried from `sys.dm_io_virtual_file_stats`. +- *AzureSQLPoolOsWaitStats*: Returns information about all the waits encountered by threads that executed. Queried from `sys.dm_os_wait_stats`. +- *AzureSQLPoolMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLPoolPerformanceCounters*: A selected list of performance counters from `sys.dm_os_performance_counters`. Note: Performance counters where the cntr_type column value is 537003264 are already returned with a percentage format between 0 and 100. For other counters, please check [sys.dm_os_performance_counters](https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current) documentation. +- *AzureSQLPoolSchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +### database_type = "SQLServer" + +- *SQLServerDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` +- *SQLServerMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. +- *SQLServerPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more - *Availability Groups*: Bytes sent to replica, Bytes received from replica, Log bytes received, Log send queue, transaction delay, + more - *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. -- SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. -- SQLServerSchedulers - This captures `sys.dm_os_schedulers`. -- SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and +- *SQLServerProperties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. +- *SQLServerWaitStatsCategorized*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *SQLServerSchedulers*: This captures `sys.dm_os_schedulers`. +- *SQLServerRequests*: This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and blocking sessions. -- SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. -- SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- *SQLServerVolumeSpace*: Uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- SQLServerCpu: Uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). - SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup - SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup +### Output Measures -#### Output Measures The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. -`sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` -`sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats -`sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties -`sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk -`sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters -`sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers - +- `sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` +- `sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats +- `sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties +- `sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk +- `sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters +- `sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers The following Performance counter metrics can be used directly, with no delta calculations: - - SQLServer:Buffer Manager\Buffer cache hit ratio - - SQLServer:Buffer Manager\Page life expectancy - - SQLServer:Buffer Node\Page life expectancy - - SQLServer:Database Replica\Log Apply Pending Queue - - SQLServer:Database Replica\Log Apply Ready Queue - - SQLServer:Database Replica\Log Send Queue - - SQLServer:Database Replica\Recovery Queue - - SQLServer:Databases\Data File(s) Size (KB) - - SQLServer:Databases\Log File(s) Size (KB) - - SQLServer:Databases\Log File(s) Used Size (KB) - - SQLServer:Databases\XTP Memory Used (KB) - - SQLServer:General Statistics\Active Temp Tables - - SQLServer:General Statistics\Processes blocked - - SQLServer:General Statistics\Temp Tables For Destruction - - SQLServer:General Statistics\User Connections - - SQLServer:Memory Broker Clerks\Memory broker clerk size - - SQLServer:Memory Manager\Memory Grants Pending - - SQLServer:Memory Manager\Target Server Memory (KB) - - SQLServer:Memory Manager\Total Server Memory (KB) - - SQLServer:Resource Pool Stats\Active memory grant amount (KB) - - SQLServer:Resource Pool Stats\Disk Read Bytes/sec - - SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Read IO/sec - - SQLServer:Resource Pool Stats\Disk Write Bytes/sec - - SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Write IO/sec - - SQLServer:Resource Pool Stats\Used memory (KB) - - SQLServer:Transactions\Free Space in tempdb (KB) - - SQLServer:Transactions\Version Store Size (KB) - - SQLServer:User Settable\Query - - SQLServer:Workload Group Stats\Blocked tasks - - SQLServer:Workload Group Stats\CPU usage % - - SQLServer:Workload Group Stats\Queued requests - - SQLServer:Workload Group Stats\Requests completed/sec + +- SQLServer:Buffer Manager\Buffer cache hit ratio +- SQLServer:Buffer Manager\Page life expectancy +- SQLServer:Buffer Node\Page life expectancy +- SQLServer:Database Replica\Log Apply Pending Queue +- SQLServer:Database Replica\Log Apply Ready Queue +- SQLServer:Database Replica\Log Send Queue +- SQLServer:Database Replica\Recovery Queue +- SQLServer:Databases\Data File(s) Size (KB) +- SQLServer:Databases\Log File(s) Size (KB) +- SQLServer:Databases\Log File(s) Used Size (KB) +- SQLServer:Databases\XTP Memory Used (KB) +- SQLServer:General Statistics\Active Temp Tables +- SQLServer:General Statistics\Processes blocked +- SQLServer:General Statistics\Temp Tables For Destruction +- SQLServer:General Statistics\User Connections +- SQLServer:Memory Broker Clerks\Memory broker clerk size +- SQLServer:Memory Manager\Memory Grants Pending +- SQLServer:Memory Manager\Target Server Memory (KB) +- SQLServer:Memory Manager\Total Server Memory (KB) +- SQLServer:Resource Pool Stats\Active memory grant amount (KB) +- SQLServer:Resource Pool Stats\Disk Read Bytes/sec +- SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Read IO/sec +- SQLServer:Resource Pool Stats\Disk Write Bytes/sec +- SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Write IO/sec +- SQLServer:Resource Pool Stats\Used memory (KB) +- SQLServer:Transactions\Free Space in tempdb (KB) +- SQLServer:Transactions\Version Store Size (KB) +- SQLServer:User Settable\Query +- SQLServer:Workload Group Stats\Blocked tasks +- SQLServer:Workload Group Stats\CPU usage % +- SQLServer:Workload Group Stats\Queued requests +- SQLServer:Workload Group Stats\Requests completed/sec Version 2 queries have the following tags: + - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. -#### Health Metric +### Health Metric + All collection versions (version 1, version 2, and database_type) support an optional plugin health metric called `sqlserver_telegraf_health`. This metric tracks if connections to SQL Server are succeeding or failing. Users can leverage this metric to detect if their SQL Server monitoring is not working as intended. In the configuration file, toggling `health_metric` to `true` will enable collection of this metric. By default, this value is set to `false` and the metric is not collected. The health metric emits one record for each connection specified by `servers` in the configuration file. The health metric emits the following tags: + - `sql_instance` - Name of the server specified in the connection string. This value is emitted as-is in the connection string. If the server could not be parsed from the connection string, a constant placeholder value is emitted - `database_name` - Name of the database or (initial catalog) specified in the connection string. This value is emitted as-is in the connection string. If the database could not be parsed from the connection string, a constant placeholder value is emitted The health metric emits the following fields: + - `attempted_queries` - Number of queries that were attempted for this connection - `successful_queries` - Number of queries that completed successfully for this connection - `database_type` - Type of database as specified by `database_type`. If `database_type` is empty, the `QueryVersion` and `AzureDB` fields are concatenated instead diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqldbqueries.go similarity index 55% rename from plugins/inputs/sqlserver/azuresqlqueries.go rename to plugins/inputs/sqlserver/azuresqldbqueries.go index 17361c20d41f8..fad68e0ea9b03 100644 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ b/plugins/inputs/sqlserver/azuresqldbqueries.go @@ -5,7 +5,7 @@ import ( ) //------------------------------------------------------------------------------------------------ -//------------------ Azure SQL Database ------------------------------------------------------ +//------------------ Azure SQL Database ---------------------------------------------------------- //------------------------------------------------------------------------------------------------ // Only executed if AzureDB flag is set const sqlAzureDBResourceStats string = ` @@ -38,7 +38,7 @@ ORDER BY [end_time] DESC; ` -// Resource Governamce is only relevant to Azure SQL DB into separate collector +// Resource Governance is only relevant to Azure SQL DB into separate collector // This will only be collected for Azure SQL Database. const sqlAzureDBResourceGovernance string = ` IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ @@ -678,544 +678,3 @@ SELECT ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability FROM sys.dm_os_schedulers AS s ` - -//------------------------------------------------------------------------------------------------ -//------------------ Azure Managed Instance ------------------------------------------------------ -//------------------------------------------------------------------------------------------------ -const sqlAzureMIProperties = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP 1 - 'sqlserver_server_properties' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[virtual_core_count] AS [cpu_count] - ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] - ,[sku] - ,SERVERPROPERTY('EngineEdition') AS [engine_edition] - ,[hardware_generation] AS [hardware_type] - ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] - ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] - ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] - ,SERVERPROPERTY('ProductVersion') AS [sql_version] - ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] - ,[db_online] - ,[db_restoring] - ,[db_recovering] - ,[db_recoveryPending] - ,[db_suspect] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.server_resource_stats -CROSS APPLY ( - SELECT - SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] - ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] - ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] - ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] - ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] - ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] - FROM sys.databases -) AS dbs -ORDER BY - [start_time] DESC; -` - -const sqlAzureMIResourceStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP(1) - 'sqlserver_azure_db_resource_stats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM - sys.server_resource_stats -ORDER BY - [end_time] DESC; -` - -const sqlAzureMIResourceGovernance string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_instance_resource_governance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[instance_cap_cpu] - ,[instance_max_log_rate] - ,[instance_max_worker_threads] - ,[tempdb_log_file_number] - ,[volume_local_iops] - ,[volume_external_xstore_iops] - ,[volume_managed_xstore_iops] - ,[volume_type_local_iops] as [voltype_local_iops] - ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] - ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] - ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_instance_resource_governance; -` - -const sqlAzureMIDatabaseIO = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_database_io' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension - ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension - ,mf.[type_desc] AS [file_type] - ,vfs.[io_stall_read_ms] AS [read_latency_ms] - ,vfs.[num_of_reads] AS [reads] - ,vfs.[num_of_bytes_read] AS [read_bytes] - ,vfs.[io_stall_write_ms] AS [write_latency_ms] - ,vfs.[num_of_writes] AS [writes] - ,vfs.[num_of_bytes_written] AS [write_bytes] - ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] - ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs -LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) - ON vfs.[database_id] = mf.[database_id] - AND vfs.[file_id] = mf.[file_id] -WHERE - vfs.[database_id] < 32760 -` - -const sqlAzureMIMemoryClerks = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_memory_clerks' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,mc.[type] AS [clerk_type] - ,SUM(mc.[pages_kb]) AS [size_kb] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) -GROUP BY - mc.[type] -HAVING - SUM(mc.[pages_kb]) >= 1024 -OPTION(RECOMPILE); -` - -const sqlAzureMIOsWaitStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_waitstats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,ws.[wait_type] - ,[wait_time_ms] - ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] - ,[signal_wait_time_ms] - ,[max_wait_time_ms] - ,[waiting_tasks_count] - ,CASE - WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' - WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' - WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' - WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' - WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' - WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' - WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' - WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' - WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' - WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' - or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' - WHEN ws.[wait_type] LIKE 'SLEEP[_]%' - or ws.[wait_type] IN ( - 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', - 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', - 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' - WHEN ws.[wait_type] IN( - 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', - 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' - WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' - WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' - WHEN ws.[wait_type] IN ( - 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', - 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' - WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' - WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' - or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' - WHEN ws.[wait_type] IN( - 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', - 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' - WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') - or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' - or ws.[wait_type] like 'BP%' THEN 'Parallelism' - WHEN ws.[wait_type] IN( - 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', - 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', - 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' - WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' - WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' - or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' - or ws.[wait_type] LIKE 'SE_REPL[_]%' - or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' - WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' - or ws.[wait_type] IN ( - 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', - 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', - 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' - WHEN ws.[wait_type] IN ( - 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', - 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', - 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', - 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' - ELSE 'Other' - END as [wait_category] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) -WHERE - ws.[wait_type] NOT IN ( - N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', - N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', - N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', - N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', - N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', - N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', - N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', - N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', - N'PARALLEL_REDO_WORKER_WAIT_WORK', - N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', - N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', - N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', - N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', - N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', - N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', - N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', - N'QDS_ASYNC_QUEUE', - N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', - N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', - N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', - N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', - N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - N'SQLTRACE_WAIT_ENTRIES', - N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', - N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', - N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', - N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', - N'RBIO_COMM_RETRY') -AND [waiting_tasks_count] > 10 -AND [wait_time_ms] > 100; -` - -const sqlAzureMIPerformanceCounters = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -DECLARE @PCounters TABLE -( - [object_name] nvarchar(128), - [counter_name] nvarchar(128), - [instance_name] nvarchar(128), - [cntr_value] bigint, - [cntr_type] INT , - Primary Key([object_name],[counter_name],[instance_name]) -); - -WITH PerfCounters AS ( - SELECT DISTINCT - RTrim(spi.[object_name]) [object_name] - ,RTrim(spi.[counter_name]) [counter_name] - ,CASE WHEN ( - RTRIM(spi.[object_name]) LIKE '%:Databases' - OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' - OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' - OR RTRIM(spi.[object_name]) LIKE '%:Query Store' - OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' - OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value - WHEN - RTRIM([object_name]) LIKE '%:Availability Replica' - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) - ELSE RTRIM(spi.instance_name) - END AS [instance_name] - ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] - ,spi.[cntr_type] - FROM sys.dm_os_performance_counters AS spi - LEFT JOIN sys.databases AS d - ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID - = CASE - /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ - WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL - THEN d.[name] - ELSE d.[physical_database_name] - END - WHERE - counter_name IN ( - 'SQL Compilations/sec' - ,'SQL Re-Compilations/sec' - ,'User Connections' - ,'Batch Requests/sec' - ,'Logouts/sec' - ,'Logins/sec' - ,'Processes blocked' - ,'Latch Waits/sec' - ,'Full Scans/sec' - ,'Index Searches/sec' - ,'Page Splits/sec' - ,'Page lookups/sec' - ,'Page reads/sec' - ,'Page writes/sec' - ,'Readahead pages/sec' - ,'Lazy writes/sec' - ,'Checkpoint pages/sec' - ,'Table Lock Escalations/sec' - ,'Page life expectancy' - ,'Log File(s) Size (KB)' - ,'Log File(s) Used Size (KB)' - ,'Data File(s) Size (KB)' - ,'Transactions/sec' - ,'Write Transactions/sec' - ,'Active Transactions' - ,'Log Growths' - ,'Active Temp Tables' - ,'Logical Connections' - ,'Temp Tables Creation Rate' - ,'Temp Tables For Destruction' - ,'Free Space in tempdb (KB)' - ,'Version Store Size (KB)' - ,'Memory Grants Pending' - ,'Memory Grants Outstanding' - ,'Free list stalls/sec' - ,'Buffer cache hit ratio' - ,'Buffer cache hit ratio base' - ,'Backup/Restore Throughput/sec' - ,'Total Server Memory (KB)' - ,'Target Server Memory (KB)' - ,'Log Flushes/sec' - ,'Log Flush Wait Time' - ,'Memory broker clerk size' - ,'Log Bytes Flushed/sec' - ,'Bytes Sent to Replica/sec' - ,'Log Send Queue' - ,'Bytes Sent to Transport/sec' - ,'Sends to Replica/sec' - ,'Bytes Sent to Transport/sec' - ,'Sends to Transport/sec' - ,'Bytes Received from Replica/sec' - ,'Receives from Replica/sec' - ,'Flow Control Time (ms/sec)' - ,'Flow Control/sec' - ,'Resent Messages/sec' - ,'Redone Bytes/sec' - ,'XTP Memory Used (KB)' - ,'Transaction Delay' - ,'Log Bytes Received/sec' - ,'Log Apply Pending Queue' - ,'Redone Bytes/sec' - ,'Recovery Queue' - ,'Log Apply Ready Queue' - ,'CPU usage %' - ,'CPU usage % base' - ,'Queued requests' - ,'Requests completed/sec' - ,'Blocked tasks' - ,'Active memory grant amount (KB)' - ,'Disk Read Bytes/sec' - ,'Disk Read IO Throttled/sec' - ,'Disk Read IO/sec' - ,'Disk Write Bytes/sec' - ,'Disk Write IO Throttled/sec' - ,'Disk Write IO/sec' - ,'Used memory (KB)' - ,'Forwarded Records/sec' - ,'Background Writer pages/sec' - ,'Percent Log Used' - ,'Log Send Queue KB' - ,'Redo Queue KB' - ,'Mirrored Write Transactions/sec' - ,'Group Commit Time' - ,'Group Commits/Sec' - ,'Workfiles Created/sec' - ,'Worktables Created/sec' - ,'Distributed Query' - ,'DTC calls' - ,'Query Store CPU usage' - ) OR ( - spi.[object_name] LIKE '%User Settable%' - OR spi.[object_name] LIKE '%SQL Errors%' - OR spi.[object_name] LIKE '%Batch Resp Statistics%' - ) OR ( - spi.[instance_name] IN ('_Total') - AND spi.[counter_name] IN ( - 'Lock Timeouts/sec' - ,'Lock Timeouts (timeout > 0)/sec' - ,'Number of Deadlocks/sec' - ,'Lock Waits/sec' - ,'Latch Waits/sec' - ) - ) -) - -INSERT INTO @PCounters select * from PerfCounters - -SELECT - 'sqlserver_performance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,pc.[object_name] AS [object] - ,pc.[counter_name] AS [counter] - ,CASE pc.[instance_name] - WHEN '_Total' THEN 'Total' - ELSE ISNULL(pc.[instance_name],'') - END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] - ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -from @PCounters pc -LEFT OUTER JOIN @PCounters AS pc1 - ON ( - pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') - OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') - ) - AND pc.[object_name] = pc1.[object_name] - AND pc.[instance_name] = pc1.[instance_name] - AND pc1.[counter_name] LIKE '%base' -WHERE - pc.[counter_name] NOT LIKE '% base' -OPTION (RECOMPILE); -` - -const sqlAzureMIRequests string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 -CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) - -SELECT - 'sqlserver_requests' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,COALESCE(r.[status], s.[status]) AS [status] - ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] - ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] - ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] - ,COALESCE(r.[writes], s.[writes]) AS [writes] - ,r.[command] - ,r.[wait_time] as [wait_time_ms] - ,r.[wait_type] - ,r.[wait_resource] - ,r.[blocking_session_id] - ,s.[program_name] - ,s.[host_name] - ,s.[nt_user_name] - ,s.[login_name] - ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] - ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) - WHEN 0 THEN '0-Read Committed' - WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' - WHEN 2 THEN '2-Read Committed' - WHEN 3 THEN '3-Repeatable Read' - WHEN 4 THEN '4-Serializable' - WHEN 5 THEN '5-Snapshot' - ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' - END, 30) AS [transaction_isolation_level] - ,r.[granted_query_memory] as [granted_query_memory_pages] - ,r.[percent_complete] - ,SUBSTRING( - qt.[text], - r.[statement_start_offset] / 2 + 1, - (CASE WHEN r.[statement_end_offset] = -1 - THEN DATALENGTH(qt.text) - ELSE r.[statement_end_offset] - END - r.[statement_start_offset]) / 2 + 1 - ) AS [statement_text] - ,qt.[objectid] - ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] - ,DB_NAME(qt.[dbid]) [stmt_db_name] - ,CONVERT(varchar(20),[query_hash],1) as [query_hash] - ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] - ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_exec_sessions AS s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON s.[session_id] = r.[session_id] -OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt -WHERE - (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) - OR ( - r.session_id IS NOT NULL - AND ( - s.is_user_process = 1 - OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') - ) - ) -OPTION(MAXDOP 1); -` - -const sqlAzureMISchedulers string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_schedulers' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] - ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] - ,s.[is_online] - ,s.[is_idle] - ,s.[preemptive_switches_count] - ,s.[context_switches_count] - ,s.[current_tasks_count] - ,s.[runnable_tasks_count] - ,s.[current_workers_count] - ,s.[active_workers_count] - ,s.[work_queue_count] - ,s.[pending_disk_io_count] - ,s.[load_factor] - ,s.[yield_count] - ,s.[total_cpu_usage_ms] - ,s.[total_scheduler_delay_ms] - ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability -FROM sys.dm_os_schedulers AS s -` diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go new file mode 100644 index 0000000000000..802afa0ee3fcc --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go @@ -0,0 +1,546 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Managed Instance ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzureMIProperties = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP 1 + 'sqlserver_server_properties' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[virtual_core_count] AS [cpu_count] + ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] + ,[sku] + ,SERVERPROPERTY('EngineEdition') AS [engine_edition] + ,[hardware_generation] AS [hardware_type] + ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] + ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] + ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,SERVERPROPERTY('ProductVersion') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] + ,[db_online] + ,[db_restoring] + ,[db_recovering] + ,[db_recoveryPending] + ,[db_suspect] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.server_resource_stats +CROSS APPLY ( + SELECT + SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] + ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] + ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] + ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] + ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] + ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] + FROM sys.databases +) AS dbs +ORDER BY + [start_time] DESC; +` + +const sqlAzureMIResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.server_resource_stats +ORDER BY + [end_time] DESC; +` + +const sqlAzureMIResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_instance_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[tempdb_log_file_number] + ,[volume_local_iops] + ,[volume_external_xstore_iops] + ,[volume_managed_xstore_iops] + ,[volume_type_local_iops] as [voltype_local_iops] + ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] + ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] + ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_instance_resource_governance; +` + +const sqlAzureMIDatabaseIO = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,mf.[type_desc] AS [file_type] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs +LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) + ON vfs.[database_id] = mf.[database_id] + AND vfs.[file_id] = mf.[file_id] +WHERE + vfs.[database_id] < 32760 +` + +const sqlAzureMIMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +const sqlAzureMIOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,ws.[wait_type] + ,[wait_time_ms] + ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,[signal_wait_time_ms] + ,[max_wait_time_ms] + ,[waiting_tasks_count] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' + or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' + or ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' + or ws.[wait_type] like 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + or ws.[wait_type] LIKE 'SE_REPL[_]%' + or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + or ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzureMIPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +from @PCounters pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' +OPTION (RECOMPILE); +` + +const sqlAzureMIRequests string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 +CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) + +SELECT + 'sqlserver_requests' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,s.[session_id] + ,ISNULL(r.[request_id], 0) as [request_id] + ,COALESCE(r.[status], s.[status]) AS [status] + ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] + ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] + ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] + ,COALESCE(r.[writes], s.[writes]) AS [writes] + ,r.[command] + ,r.[wait_time] as [wait_time_ms] + ,r.[wait_type] + ,r.[wait_resource] + ,r.[blocking_session_id] + ,s.[program_name] + ,s.[host_name] + ,s.[nt_user_name] + ,s.[login_name] + ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] + ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' + END, 30) AS [transaction_isolation_level] + ,r.[granted_query_memory] as [granted_query_memory_pages] + ,r.[percent_complete] + ,SUBSTRING( + qt.[text], + r.[statement_start_offset] / 2 + 1, + (CASE WHEN r.[statement_end_offset] = -1 + THEN DATALENGTH(qt.text) + ELSE r.[statement_end_offset] + END - r.[statement_start_offset]) / 2 + 1 + ) AS [statement_text] + ,qt.[objectid] + ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] + ,DB_NAME(qt.[dbid]) [stmt_db_name] + ,CONVERT(varchar(20),[query_hash],1) as [query_hash] + ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] + ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_exec_sessions AS s +LEFT OUTER JOIN sys.dm_exec_requests AS r + ON s.[session_id] = r.[session_id] +OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt +WHERE + (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) + OR ( + r.session_id IS NOT NULL + AND ( + s.is_user_process = 1 + OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') + ) + ) +OPTION(MAXDOP 1); +` + +const sqlAzureMISchedulers string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ,s.[total_cpu_usage_ms] + ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_schedulers AS s +` diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries.go b/plugins/inputs/sqlserver/azuresqlpoolqueries.go new file mode 100644 index 0000000000000..36fe087fc57e6 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries.go @@ -0,0 +1,477 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Sql Elastic Pool ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzurePoolResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_pool_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[snapshot_time] + ,cast([cap_vcores_used_percent] as float) AS [avg_cpu_percent] + ,cast([avg_data_io_percent] as float) AS [avg_data_io_percent] + ,cast([avg_log_write_percent] as float) AS [avg_log_write_percent] + ,cast([avg_storage_percent] as float) AS [avg_storage_percent] + ,cast([max_worker_percent] as float) AS [max_worker_percent] + ,cast([max_session_percent] as float) AS [max_session_percent] + ,cast([max_data_space_kb]/1024. as int) AS [storage_limit_mb] + ,cast([avg_instance_cpu_percent] as float) AS [avg_instance_cpu_percent] + ,cast([avg_allocated_storage_percent] as float) AS [avg_allocated_storage_percent] +FROM + sys.dm_resource_governor_resource_pools_history_ex +WHERE + [name] = 'SloSharedPool1' +ORDER BY + [snapshot_time] DESC; +` + +const sqlAzurePoolResourceGovernance = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_pool_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[slo_name] + ,[dtu_limit] + ,[cpu_limit] + ,[max_cpu] + ,[cap_cpu] + ,[max_db_memory] + ,[max_db_max_size_in_mb] + ,[db_file_growth_in_mb] + ,[log_size_in_mb] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[checkpoint_rate_mbps] + ,[checkpoint_rate_io] + ,[primary_group_max_workers] + ,[primary_min_log_rate] + ,[primary_max_log_rate] + ,[primary_group_min_io] + ,[primary_group_max_io] + ,[primary_group_min_cpu] + ,[primary_group_max_cpu] + ,[primary_pool_max_workers] + ,[pool_max_io] + ,[volume_local_iops] + ,[volume_managed_xstore_iops] + ,[volume_external_xstore_iops] + ,[volume_type_local_iops] + ,[volume_type_managed_xstore_iops] + ,[volume_type_external_xstore_iops] + ,[volume_pfs_iops] + ,[volume_type_pfs_iops] +FROM + sys.dm_user_db_resource_governance +WHERE database_id = DB_ID(); +` + +const sqlAzurePoolDatabaseIO = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,CASE + WHEN vfs.[database_id] = 1 THEN 'master' + WHEN vfs.[database_id] = 2 THEN 'tempdb' + WHEN vfs.[database_id] = 3 THEN 'model' + WHEN vfs.[database_id] = 4 THEN 'msdb' + ELSE gov.[database_name] + END AS [database_name] + ,vfs.[database_id] + ,vfs.[file_id] + ,CASE + WHEN vfs.[file_id] = 2 THEN 'LOG' + ELSE 'ROWS' + END AS [file_type] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.[io_stall_queued_read_ms] AS [rg_read_stall_ms] + ,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms] + ,[size_on_disk_bytes] + ,ISNULL([size_on_disk_bytes],0)/(1024*1024) AS [size_on_disk_mb] +FROM + sys.dm_io_virtual_file_stats(NULL,NULL) AS vfs +LEFT OUTER JOIN + sys.dm_user_db_resource_governance AS gov +ON vfs.[database_id] = gov.[database_id]; +` + +const sqlAzurePoolOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[wait_type] + ,[waiting_tasks_count] + ,[wait_time_ms] + ,[max_wait_time_ms] + ,[signal_wait_time_ms] + ,[wait_time_ms]-[signal_wait_time_ms] AS [resource_wait_ms] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' THEN 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' OR ws.[wait_type] LIKE 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' OR ws.[wait_type] LIKE 'DTCNEW%' OR ws.[wait_type] LIKE 'TRAN_%' + OR ws.[wait_type] LIKE 'XACT%' OR ws.[wait_type] LIKE 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' OR ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN ( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + OR ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' OR ws.[wait_type] LIKE 'WAIT_RBIO_RG%' THEN 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' OR ws.[wait_type] LIKE 'WAIT_RBIO[_]%' THEN 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + OR ws.[wait_type] LIKE 'HT%' or ws.[wait_type] LIKE 'BMP%' + OR ws.[wait_type] LIKE 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + OR ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + OR ws.[wait_type] LIKE 'SE_REPL[_]%' + OR ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + OR ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END AS [wait_category] +FROM sys.dm_os_wait_stats AS ws +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', + N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzurePoolMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] +FROM + sys.dm_os_memory_clerks AS mc +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +// Specific case on this query when cntr_type = 537003264 to return a percentage value between 0 and 100 +// cf. https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current +// Performance counters where the cntr_type column value is 537003264 display the ratio of a subset to its set as a percentage. +// For example, the Buffer Manager:Buffer cache hit ratio counter compares the total number of cache hits and the total number of cache lookups. +// As such, to get a snapshot-like reading of the last second only, you must compare the delta between the current value and the base value (denominator) +// between two collection points that are one second apart. +// The corresponding base value is the performance counter Buffer Manager:Buffer cache hit ratio base where the cntr_type column value is 1073939712. +const sqlAzurePoolPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] int + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTRIM(pc.[object_name]) AS [object_name] + ,RTRIM(pc.[counter_name]) AS [counter_name] + ,ISNULL(gov.[database_name], RTRIM(pc.instance_name)) AS [instance_name] + ,pc.[cntr_value] AS [cntr_value] + ,pc.[cntr_type] AS [cntr_type] + FROM sys.dm_os_performance_counters AS pc + LEFT JOIN sys.dm_user_db_resource_governance AS gov + ON + TRY_CONVERT([uniqueidentifier], pc.[instance_name]) = gov.[physical_database_guid] + WHERE + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (pc.[object_name] LIKE 'MSSQL%:Databases%' AND pc.[instance_name] IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + pc.[counter_name] IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Query Store CPU usage' + ) OR ( + pc.[object_name] LIKE '%User Settable%' + OR pc.[object_name] LIKE '%SQL Errors%' + OR pc.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + pc.[instance_name] IN ('_Total') + AND pc.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST( + CASE WHEN pc.[cntr_type] = 537003264 AND base.[cntr_value] > 0 + THEN (pc.[cntr_value] * 1.0) / (base.[cntr_value] * 1.0) * 100 + ELSE pc.[cntr_value] + END + AS float) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS base +ON + pc.[counter_name] = REPLACE(base.[counter_name],' base','') + AND pc.[object_name] = base.[object_name] + AND pc.[instance_name] = base.[instance_name] + AND base.[cntr_type] = 1073939712 +WHERE + pc.[cntr_type] <> 1073939712 +OPTION(RECOMPILE) +` + +const sqlAzurePoolSchedulers = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[scheduler_id] + ,[cpu_id] + ,[status] + ,[is_online] + ,[is_idle] + ,[preemptive_switches_count] + ,[context_switches_count] + ,[idle_switches_count] + ,[current_tasks_count] + ,[runnable_tasks_count] + ,[current_workers_count] + ,[active_workers_count] + ,[work_queue_count] + ,[pending_disk_io_count] + ,[load_factor] + ,[failed_to_create_worker] + ,[quantum_length_us] + ,[yield_count] + ,[total_cpu_usage_ms] + ,[total_cpu_idle_capped_ms] + ,[total_scheduler_delay_ms] + ,[ideal_workers_limit] +FROM + sys.dm_os_schedulers; +` diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go new file mode 100644 index 0000000000000..2149e0d23fd4c --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go @@ -0,0 +1,312 @@ +package sqlserver + +import ( + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +func TestAzureSQL_ElasticPool_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_stats")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "elastic_pool_name")) + require.True(t, acc.HasField("sqlserver_pool_resource_stats", "snapshot_time")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_session_percent")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_stats", "storage_limit_mb")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_allocated_storage_percent")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_ElasticPool_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_governance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cpu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_pfs_iops")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQL_ElasticPool_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_mb")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolOsWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + + server.Stop() +} + +func TestAzureSQL_ElasticPool_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "elastic_pool_name")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "status")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "idle_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasField("sqlserver_schedulers", "failed_to_create_worker")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "quantum_length_us")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_idle_capped_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "ideal_workers_limit")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 4a965bec15afd..def051836c024 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -4,13 +4,13 @@ import ( "database/sql" "errors" "fmt" - "log" "strings" "sync" "time" "github.com/Azure/go-autorest/autorest/adal" mssql "github.com/denisenkom/go-mssqldb" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" @@ -18,18 +18,20 @@ import ( // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - AuthMethod string `toml:"auth_method"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - DatabaseType string `toml:"database_type"` - IncludeQuery []string `toml:"include_query"` - ExcludeQuery []string `toml:"exclude_query"` - HealthMetric bool `toml:"health_metric"` - pools []*sql.DB - queries MapQuery - adalToken *adal.Token - muCacheLock sync.RWMutex + Servers []string `toml:"servers"` + AuthMethod string `toml:"auth_method"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + DatabaseType string `toml:"database_type"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` + Log telegraf.Logger `toml:"-"` + + pools []*sql.DB + queries MapQuery + adalToken *adal.Token + muCacheLock sync.RWMutex } // Query struct @@ -54,6 +56,7 @@ const defaultServer = "Server=.;app name=telegraf;log=1;" const ( typeAzureSQLDB = "AzureSQLDB" typeAzureSQLManagedInstance = "AzureSQLManagedInstance" + typeAzureSQLPool = "AzureSQLPool" typeSQLServer = "SQLServer" ) @@ -87,42 +90,34 @@ servers = [ ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -## Possible values for database_type are - "AzureSQLDB" or "AzureSQLManagedInstance" or "SQLServer" - -## Queries enabled by default for database_type = "AzureSQLDB" are - -## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, -## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" -# database_type = "AzureSQLDB" +database_type = "SQLServer" -## A list of queries to include. If not specified, all the above listed queries are used. -# include_query = [] +## A list of queries to include. If not specified, all the below listed queries are used. +include_query = [] ## A list of queries to explicitly ignore. -# exclude_query = [] - -## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - -## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, -## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers - -# database_type = "AzureSQLManagedInstance" - -# include_query = [] - -# exclude_query = [] +exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] ## Queries enabled by default for database_type = "SQLServer" are - ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, -## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu +## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates -database_type = "SQLServer" +## Queries enabled by default for database_type = "AzureSQLDB" are - +## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers -include_query = [] +## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers -## SQLServerAvailabilityReplicaStates and SQLServerDatabaseReplicaStates are optional queries and hence excluded here as default -exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +## Queries enabled by default for database_type = "AzureSQLPool" are - +## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers -## Following are old config settings, you may use them only if you are using the earlier flavor of queries, however it is recommended to use +## Following are old config settings +## You may use them only if you are using the earlier flavor of queries, however it is recommended to use ## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version @@ -149,16 +144,17 @@ type scanner interface { Scan(dest ...interface{}) error } -func initQueries(s *SQLServer) error { +func (s *SQLServer) initQueries() error { s.queries = make(MapQuery) queries := s.queries - log.Printf("I! [inputs.sqlserver] Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) + s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) // New config option database_type // To prevent query definition conflicts - // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB - // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI - // Constant defintiions for type "SQLServer" start with sqlServer + // Constant definitions for type "AzureSQLDB" start with sqlAzureDB + // Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI + // Constant definitions for type "AzureSQLPool" start with sqlAzurePool + // Constant definitions for type "SQLServer" start with sqlServer if s.DatabaseType == typeAzureSQLDB { queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false} queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false} @@ -180,6 +176,14 @@ func initQueries(s *SQLServer) error { queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false} queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false} queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false} + } else if s.DatabaseType == typeAzureSQLPool { + queries["AzureSQLPoolResourceStats"] = Query{ScriptName: "AzureSQLPoolResourceStats", Script: sqlAzurePoolResourceStats, ResultByRow: false} + queries["AzureSQLPoolResourceGovernance"] = Query{ScriptName: "AzureSQLPoolResourceGovernance", Script: sqlAzurePoolResourceGovernance, ResultByRow: false} + queries["AzureSQLPoolDatabaseIO"] = Query{ScriptName: "AzureSQLPoolDatabaseIO", Script: sqlAzurePoolDatabaseIO, ResultByRow: false} + queries["AzureSQLPoolOsWaitStats"] = Query{ScriptName: "AzureSQLPoolOsWaitStats", Script: sqlAzurePoolOsWaitStats, ResultByRow: false} + queries["AzureSQLPoolMemoryClerks"] = Query{ScriptName: "AzureSQLPoolMemoryClerks", Script: sqlAzurePoolMemoryClerks, ResultByRow: false} + queries["AzureSQLPoolPerformanceCounters"] = Query{ScriptName: "AzureSQLPoolPerformanceCounters", Script: sqlAzurePoolPerformanceCounters, ResultByRow: false} + queries["AzureSQLPoolSchedulers"] = Query{ScriptName: "AzureSQLPoolSchedulers", Script: sqlAzurePoolSchedulers, ResultByRow: false} } else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} @@ -200,7 +204,7 @@ func initQueries(s *SQLServer) error { } // Decide if we want to run version 1 or version 2 queries if s.QueryVersion == 2 { - log.Println("W! DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.") + s.Log.Warn("DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false} queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false} @@ -211,7 +215,7 @@ func initQueries(s *SQLServer) error { queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} } else { - log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.") + s.Log.Warn("DEPRECATED: query_version=1 has been deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false} queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false} @@ -240,7 +244,7 @@ func initQueries(s *SQLServer) error { for query := range queries { querylist = append(querylist, query) } - log.Printf("I! [inputs.sqlserver] Config: Effective Queries: %#v\n", querylist) + s.Log.Infof("Config: Effective Queries: %#v\n", querylist) return nil } @@ -281,7 +285,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { // Start initialize a list of connection pools func (s *SQLServer) Start(acc telegraf.Accumulator) error { - if err := initQueries(s); err != nil { + if err := s.initQueries(); err != nil { acc.AddError(err) return err } @@ -353,11 +357,11 @@ func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumul // Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx if sqlerr, ok := err.(mssql.Error); ok { - return fmt.Errorf("Query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, + return fmt.Errorf("query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err) } - return fmt.Errorf("Query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) + return fmt.Errorf("query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) } defer rows.Close() @@ -423,7 +427,7 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e // values for header, val := range columnMap { if _, ok := (*val).(string); !ok { - fields[header] = (*val) + fields[header] = *val } } // add fields to Accumulator @@ -474,7 +478,7 @@ func (s *SQLServer) getDatabaseTypeToLog() string { func (s *SQLServer) Init() error { if len(s.Servers) == 0 { - log.Println("W! Warning: Server list is empty.") + s.Log.Warn("Warning: Server list is empty.") } return nil diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index a9a022bd23fa7..9d1ee29187e22 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -32,8 +32,9 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { QueryVersion: 2, IncludeQuery: test["IncludeQuery"].([]string), ExcludeQuery: test["ExcludeQuery"].([]string), + Log: testutil.Logger{}, } - require.NoError(t, initQueries(&s)) + require.NoError(t, s.initQueries()) require.Equal(t, len(s.queries), test["queriesTotal"].(int)) for _, query := range test["queries"].([]string) { require.Contains(t, s.queries, query) @@ -116,10 +117,12 @@ func TestSqlServer_MultipleInstanceIntegration(t *testing.T) { s := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -151,11 +154,13 @@ func TestSqlServer_MultipleInstanceWithHealthMetricIntegration(t *testing.T) { s := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"DatabaseSize"}, HealthMetric: true, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -192,12 +197,14 @@ func TestSqlServer_HealthMetric(t *testing.T) { IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, HealthMetric: true, AuthMethod: "connection_string", + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{fakeServer1}, IncludeQuery: []string{"DatabaseSize"}, AuthMethod: "connection_string", + Log: testutil.Logger{}, } // acc1 should have the health metric because it is specified in the config @@ -225,16 +232,17 @@ func TestSqlServer_HealthMetric(t *testing.T) { } func TestSqlServer_MultipleInit(t *testing.T) { - s := &SQLServer{} + s := &SQLServer{Log: testutil.Logger{}} s2 := &SQLServer{ ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } - require.NoError(t, initQueries(s)) + require.NoError(t, s.initQueries()) _, ok := s.queries["DatabaseSize"] require.True(t, ok) - require.NoError(t, initQueries(s2)) + require.NoError(t, s.initQueries()) _, ok = s2.queries["DatabaseSize"] require.False(t, ok) s.Stop() @@ -335,11 +343,13 @@ func TestSqlServer_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { Servers: []string{testServer}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, DatabaseType: "AzureSQLDB", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator @@ -376,11 +386,13 @@ func TestSqlServer_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { Servers: []string{testServer2019}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } s2012 := &SQLServer{ Servers: []string{testServer2012}, DatabaseType: "SQLServer", IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, } var acc2019, acc2012 testutil.Accumulator diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md index 6469b259b78ec..7f706f2d13096 100644 --- a/plugins/inputs/stackdriver/README.md +++ b/plugins/inputs/stackdriver/README.md @@ -6,7 +6,7 @@ Query data from Google Cloud Monitoring (formerly Stackdriver) using the This plugin accesses APIs which are [chargeable][pricing]; you might incur costs. -### Configuration +## Configuration ```toml [[inputs.stackdriver]] @@ -58,9 +58,9 @@ costs. ## For a list of aligner strings see: ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # distribution_aggregation_aligners = [ - # "ALIGN_PERCENTILE_99", - # "ALIGN_PERCENTILE_95", - # "ALIGN_PERCENTILE_50", + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", # ] ## Filters can be added to reduce the number of time series matched. All @@ -84,23 +84,24 @@ costs. ## Metric labels refine the time series selection with the following expression: ## metric.labels. = # [[inputs.stackdriver.filter.metric_labels]] - # key = "device_name" - # value = 'one_of("sda", "sdb")' + # key = "device_name" + # value = 'one_of("sda", "sdb")' ``` -#### Authentication +### Authentication It is recommended to use a service account to authenticate with the Stackdriver Monitoring API. [Getting Started with Authentication][auth]. -### Metrics +## Metrics Metrics are created using one of there patterns depending on if the value type is a scalar value, raw distribution buckets, or aligned bucket values. In all cases, the Stackdriver metric type is split on the last component into the measurement and field: -``` + +```sh compute.googleapis.com/instance/disk/read_bytes_count └────────── measurement ─────────┘ └── field ───┘ ``` @@ -114,7 +115,6 @@ compute.googleapis.com/instance/disk/read_bytes_count - fields: - field - **Distributions:** Distributions are represented by a set of fields along with the bucket values @@ -132,7 +132,7 @@ represents the total number of items less than the `lt` tag. - field_range_min - field_range_max -+ measurement +- measurement - tags: - resource_labels - metric_labels @@ -149,14 +149,16 @@ represents the total number of items less than the `lt` tag. - fields: - field_alignment_function -### Troubleshooting +## Troubleshooting When Telegraf is ran with `--debug`, detailed information about the performed queries will be logged. -### Example Output -``` +## Example Output + +```shell ``` + [stackdriver]: https://cloud.google.com/monitoring/api/v3/ [auth]: https://cloud.google.com/docs/authentication/getting-started [pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index cc8b1a40a10a5..648e82624a1ea 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -10,18 +10,19 @@ import ( "time" monitoring "cloud.google.com/go/monitoring/apiv3/v2" - googlepbduration "github.com/golang/protobuf/ptypes/duration" - googlepbts "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/api/iterator" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. "github.com/influxdata/telegraf/selfstat" - "google.golang.org/api/iterator" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) const ( @@ -312,8 +313,8 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { } wg.Wait() - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } return nil @@ -393,8 +394,8 @@ func (s *Stackdriver) newTimeSeriesConf( ) *timeSeriesConf { filter := s.newListTimeSeriesFilter(metricType) interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ Name: fmt.Sprintf("projects/%s", s.Project), @@ -432,7 +433,7 @@ func (t *timeSeriesConf) initForAggregate(alignerStr string) { } aligner := monitoringpb.Aggregation_Aligner(alignerInt) agg := &monitoringpb.Aggregation{ - AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + AlignmentPeriod: &durationpb.Duration{Seconds: 60}, PerSeriesAligner: aligner, } t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) @@ -522,8 +523,8 @@ func (s *Stackdriver) generatetimeSeriesConfs( if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { // Update interval for timeseries requests in timeseries cache interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { timeSeriesConf.listTimeSeriesRequest.Interval = interval @@ -643,35 +644,34 @@ func (s *Stackdriver) gatherTimeSeries( } // AddDistribution adds metrics from a distribution value type. -func (s *Stackdriver) addDistribution( - metric *distributionpb.Distribution, - tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +func (s *Stackdriver) addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time, + grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, ) error { field := tsConf.fieldKey name := tsConf.measurement - if err := grouper.Add(name, tags, ts, field+"_count", metric.Count); err != nil { + if err := grouper.Add(name, tags, ts, field+"_count", dist.Count); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_mean", metric.Mean); err != nil { + if err := grouper.Add(name, tags, ts, field+"_mean", dist.Mean); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation); err != nil { + if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", dist.SumOfSquaredDeviation); err != nil { return err } - if metric.Range != nil { - if err := grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min); err != nil { + if dist.Range != nil { + if err := grouper.Add(name, tags, ts, field+"_range_min", dist.Range.Min); err != nil { return err } - if err := grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max); err != nil { + if err := grouper.Add(name, tags, ts, field+"_range_max", dist.Range.Max); err != nil { return err } } - linearBuckets := metric.BucketOptions.GetLinearBuckets() - exponentialBuckets := metric.BucketOptions.GetExponentialBuckets() - explicitBuckets := metric.BucketOptions.GetExplicitBuckets() + linearBuckets := dist.BucketOptions.GetLinearBuckets() + exponentialBuckets := dist.BucketOptions.GetExponentialBuckets() + explicitBuckets := dist.BucketOptions.GetExplicitBuckets() var numBuckets int32 if linearBuckets != nil { @@ -704,8 +704,8 @@ func (s *Stackdriver) addDistribution( // Add to the cumulative count; trailing buckets with value 0 are // omitted from the response. - if i < int32(len(metric.BucketCounts)) { - count += metric.BucketCounts[i] + if i < int32(len(dist.BucketCounts)) { + count += dist.BucketCounts[i] } if err := grouper.Add(name, tags, ts, field+"_bucket", count); err != nil { return err diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 0502c7bed9765..ad6b15145031a 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" @@ -15,6 +14,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) type Call struct { @@ -105,7 +105,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -138,7 +138,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -204,7 +204,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -249,7 +249,7 @@ func TestGather(t *testing.T) { Points: []*monitoringpb.Point{ { Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -283,7 +283,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -378,7 +378,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -473,7 +473,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -556,7 +556,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -702,7 +702,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -717,7 +717,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -732,7 +732,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -1081,7 +1081,7 @@ func TestListMetricDescriptorFilter(t *testing.T) { ch <- createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index ca60dbe3a2a79..e82da5a03c878 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -1,6 +1,6 @@ # StatsD Input Plugin -### Configuration +## Configuration ```toml # Statsd Server @@ -77,7 +77,7 @@ # max_ttl = "10h" ``` -### Description +## Description The statsd plugin is a special type of plugin which runs a backgrounded statsd listener service while telegraf is running. @@ -87,49 +87,48 @@ original [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_ty implementation. In short, the telegraf statsd listener will accept: - Gauges - - `users.current.den001.myapp:32|g` <- standard - - `users.current.den001.myapp:+10|g` <- additive - - `users.current.den001.myapp:-10|g` + - `users.current.den001.myapp:32|g` <- standard + - `users.current.den001.myapp:+10|g` <- additive + - `users.current.den001.myapp:-10|g` - Counters - - `deploys.test.myservice:1|c` <- increments by 1 - - `deploys.test.myservice:101|c` <- increments by 101 - - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 + - `deploys.test.myservice:1|c` <- increments by 1 + - `deploys.test.myservice:101|c` <- increments by 101 + - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 - Sets - - `users.unique:101|s` - - `users.unique:101|s` - - `users.unique:102|s` <- would result in a count of 2 for `users.unique` + - `users.unique:101|s` + - `users.unique:101|s` + - `users.unique:102|s` <- would result in a count of 2 for `users.unique` - Timings & Histograms - - `load.time:320|ms` - - `load.time.nanoseconds:1|h` - - `load.time:200|ms|@0.1` <- sampled 1/10 of the time + - `load.time:320|ms` + - `load.time.nanoseconds:1|h` + - `load.time:200|ms|@0.1` <- sampled 1/10 of the time - Distributions - - `load.time:320|d` - - `load.time.nanoseconds:1|d` - - `load.time:200|d|@0.1` <- sampled 1/10 of the time + - `load.time:320|d` + - `load.time.nanoseconds:1|d` + - `load.time:200|d|@0.1` <- sampled 1/10 of the time It is possible to omit repetitive names and merge individual stats into a single line by separating them with additional colons: - - `users.current.den001.myapp:32|g:+10|g:-10|g` - - `deploys.test.myservice:1|c:101|c:1|c|@0.1` - - `users.unique:101|s:101|s:102|s` - - `load.time:320|ms:200|ms|@0.1` +- `users.current.den001.myapp:32|g:+10|g:-10|g` +- `deploys.test.myservice:1|c:101|c:1|c|@0.1` +- `users.unique:101|s:101|s:102|s` +- `load.time:320|ms:200|ms|@0.1` This also allows for mixed types in a single line: - - `foo:1|c:200|ms` +- `foo:1|c:200|ms` The string `foo:1|c:200|ms` is internally split into two individual metrics `foo:1|c` and `foo:200|ms` which are added to the aggregator separately. - -### Influx Statsd +## Influx Statsd In order to take advantage of InfluxDB's tagging system, we have made a couple additions to the standard statsd protocol. First, you can specify tags in a manner similar to the line-protocol, like this: -``` +```shell users.current,service=payroll,region=us-west:32|g ``` @@ -139,9 +138,10 @@ users.current,service=payroll,region=us-west:32|g current.users,service=payroll,server=host01:west=10,east=10,central=2,south=10|g ``` --> -### Measurements: +## Measurements Meta: + - tags: `metric_type=` Outputted measurements will depend entirely on the measurements that the user @@ -149,42 +149,42 @@ sends, but here is a brief rundown of what you can expect to find from each metric type: - Gauges - - Gauges are a constant data type. They are not subject to averaging, and they + - Gauges are a constant data type. They are not subject to averaging, and they don’t change unless you change them. That is, once you set a gauge value, it will be a flat line on the graph until you change it again. - Counters - - Counters are the most basic type. They are treated as a count of a type of + - Counters are the most basic type. They are treated as a count of a type of event. They will continually increase unless you set `delete_counters=true`. - Sets - - Sets count the number of unique values passed to a key. For example, you + - Sets count the number of unique values passed to a key. For example, you could count the number of users accessing your system using `users:|s`. No matter how many times the same user_id is sent, the count will only increase by 1. - Timings & Histograms - - Timers are meant to track how long something took. They are an invaluable + - Timers are meant to track how long something took. They are an invaluable tool for tracking application performance. - - The following aggregate measurements are made for timers: - - `statsd__lower`: The lower bound is the lowest value statsd saw + - The following aggregate measurements are made for timers: + - `statsd__lower`: The lower bound is the lowest value statsd saw for that stat during that interval. - - `statsd__upper`: The upper bound is the highest value statsd saw + - `statsd__upper`: The upper bound is the highest value statsd saw for that stat during that interval. - - `statsd__mean`: The mean is the average of all values statsd saw + - `statsd__mean`: The mean is the average of all values statsd saw for that stat during that interval. - - `statsd__stddev`: The stddev is the sample standard deviation + - `statsd__stddev`: The stddev is the sample standard deviation of all values statsd saw for that stat during that interval. - - `statsd__sum`: The sum is the sample sum of all values statsd saw + - `statsd__sum`: The sum is the sample sum of all values statsd saw for that stat during that interval. - - `statsd__count`: The count is the number of timings statsd saw + - `statsd__count`: The count is the number of timings statsd saw for that stat during that interval. It is not averaged. - - `statsd__percentile_

` The `Pth` percentile is a value x such + - `statsd__percentile_

` The `Pth` percentile is a value x such that `P%` of all the values statsd saw for that stat during that time period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. - Distributions - - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. - - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. + - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. + - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. -### Plugin arguments +## Plugin arguments - **protocol** string: Protocol used in listener - tcp or udp options - **max_tcp_connections** []int: Maximum number of concurrent TCP connections @@ -204,12 +204,12 @@ per-measurement in the calculation of percentiles. Raising this limit increases the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. -- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) -- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) -- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format (https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition) +- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format () +- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format () +- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format () - **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. -### Statsd bucket -> InfluxDB line-protocol Templates +## Statsd bucket -> InfluxDB line-protocol Templates The plugin supports specifying templates for transforming statsd buckets into InfluxDB measurement names and tags. The templates have a _measurement_ keyword, @@ -217,7 +217,7 @@ which can be used to specify parts of the bucket that are to be used in the measurement name. Other words in the template are used as tag names. For example, the following template: -``` +```toml templates = [ "measurement.measurement.region" ] @@ -225,7 +225,7 @@ templates = [ would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 ``` @@ -233,7 +233,7 @@ cpu.load.us-west:100|g Users can also filter the template to use based on the name of the bucket, using glob matching, like so: -``` +```toml templates = [ "cpu.* measurement.measurement.region", "mem.* measurement.measurement.host" @@ -242,7 +242,7 @@ templates = [ which would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 77a01f5586a7b..df35198b129d3 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -120,11 +120,10 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam case "s:": fields["source_type_name"] = rawMetadataFields[i][2:] default: - if rawMetadataFields[i][0] == '#' { - parseDataDogTags(tags, rawMetadataFields[i][1:]) - } else { + if rawMetadataFields[i][0] != '#' { return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) } + parseDataDogTags(tags, rawMetadataFields[i][1:]) } } // Use source tag because host is reserved tag key in Telegraf. diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index fbbfef251adf9..861d2561a85a8 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -11,13 +11,14 @@ import ( "sync" "time" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" - "github.com/pkg/errors" ) const ( @@ -745,10 +746,10 @@ func (s *Statsd) parseStatsdLine(line string) error { // config file. If there is a match, it will parse the name of the metric and // map of tags. // Return values are (, , ) -func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { +func (s *Statsd) parseName(bucket string) (name string, field string, tags map[string]string) { s.Lock() defer s.Unlock() - tags := make(map[string]string) + tags = make(map[string]string) bucketparts := strings.Split(bucket, ",") // Parse out any tags in the bucket @@ -761,8 +762,7 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } } - var field string - name := bucketparts[0] + name = bucketparts[0] p := s.graphiteParser var err error @@ -789,16 +789,20 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } // Parse the key,value out of a string that looks like "key=value" -func parseKeyValue(keyvalue string) (string, string) { - var key, val string - - split := strings.Split(keyvalue, "=") +func parseKeyValue(keyValue string) (key string, val string) { + split := strings.Split(keyValue, "=") // Must be exactly 2 to get anything meaningful out of them if len(split) == 2 { key = split[0] val = split[1] } else if len(split) == 1 { val = split[0] + } else if len(split) > 2 { + // fix: https://github.com/influxdata/telegraf/issues/10113 + // fix: value has "=" parse error + // uri=/service/endpoint?sampleParam={paramValue} parse value key="uri", val="/service/endpoint?sampleParam\={paramValue}" + key = split[0] + val = strings.Join(split[1:], "=") } return key, val diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index a236d638ba330..5121f06b6b8f7 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -7,11 +7,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -53,19 +52,19 @@ func TestConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected when max==1 @@ -84,17 +83,17 @@ func TestConcurrentConns1(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected @@ -112,9 +111,9 @@ func TestCloseConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) listener.Stop() } @@ -156,7 +155,7 @@ func sendRequests(conn net.Conn, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 25000; i++ { //nolint:errcheck,revive - fmt.Fprintf(conn, testMsg) + fmt.Fprint(conn, testMsg) } } @@ -476,7 +475,7 @@ func TestParse_Distributions(t *testing.T) { parseMetrics() for key, value := range validMeasurementMap { field := map[string]interface{}{ - "value": float64(value), + "value": value, } acc.AssertContainsFields(t, key, field) } @@ -1570,7 +1569,7 @@ func testValidateGauge( } if valueExpected != valueActual { - return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) + return fmt.Errorf("measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) } return nil } @@ -1590,6 +1589,8 @@ func TestTCP(t *testing.T) { addr := statsd.TCPlistener.Addr().String() conn, err := net.Dial("tcp", addr) + require.NoError(t, err) + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) require.NoError(t, conn.Close()) @@ -1673,3 +1674,30 @@ func TestParse_Ints(t *testing.T) { require.NoError(t, s.Gather(acc)) require.Equal(t, s.Percentiles, []Number{90.0}) } + +func TestParse_KeyValue(t *testing.T) { + type output struct { + key string + val string + } + + validLines := []struct { + input string + output output + }{ + {"", output{"", ""}}, + {"only value", output{"", "only value"}}, + {"key=value", output{"key", "value"}}, + {"url=/api/querystring?key1=val1&key2=value", output{"url", "/api/querystring?key1=val1&key2=value"}}, + } + + for _, line := range validLines { + key, val := parseKeyValue(line.input) + if key != line.output.key { + t.Errorf("line: %s, key expected %s, actual %s", line, line.output.key, key) + } + if val != line.output.val { + t.Errorf("line: %s, val expected %s, actual %s", line, line.output.val, val) + } + } +} diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 61f940a8df01d..01c60e3e70171 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -6,7 +6,7 @@ and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. It can also report for triggered Suricata IDS/IPS alerts. -### Configuration +## Configuration ```toml [[inputs.suricata]] @@ -23,14 +23,15 @@ It can also report for triggered Suricata IDS/IPS alerts. alerts = false ``` -### Metrics +## Metrics Fields in the 'suricata' measurement follow the JSON format used by Suricata's stats output. -See http://suricata.readthedocs.io/en/latest/performance/statistics.html for +See for more information. All fields for Suricata stats are numeric. + - suricata - tags: - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics @@ -98,7 +99,7 @@ All fields for Suricata stats are numeric. - tcp_synack - ... -Some fields of the Suricata alerts are strings, for example the signatures. See https://suricata.readthedocs.io/en/suricata-6.0.0/output/eve/eve-json-format.html?highlight=priority#event-type-alert for more information. +Some fields of the Suricata alerts are strings, for example the signatures. See for more information. - suricata_alert - fields: @@ -112,7 +113,7 @@ Some fields of the Suricata alerts are strings, for example the signatures. See - target_port - ... -#### Suricata configuration +### Suricata configuration Suricata needs to deliver the 'stats' event type to a given unix socket for this plugin to pick up. This can be done, for example, by creating an additional @@ -128,11 +129,10 @@ output in the Suricata configuration file: threads: yes ``` -#### FreeBSD tuning - +### FreeBSD tuning -Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192 -otherwise messages from Suricata are truncated as they exceed the default available buffer space, +Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192 +otherwise messages from Suricata are truncated as they exceed the default available buffer space, consequently no statistics are processed by the plugin. ```text @@ -140,8 +140,7 @@ sysctl -w net.local.stream.recvspace=16384 sysctl -w net.local.stream.sendspace=16384 ``` - -### Example Output +## Example Output ```text suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index f3fc5f14eb394..cd13676cf6fae 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -11,9 +11,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` @@ -388,11 +389,13 @@ func TestSuricataParse(t *testing.T) { for _, tc := range tests { data, err := os.ReadFile("testdata/" + tc.filename) require.NoError(t, err) + s := Suricata{ Delimiter: "_", } acc := testutil.Accumulator{} - s.parse(&acc, data) + err = s.parse(&acc, data) + require.NoError(t, err) testutil.RequireMetricsEqual(t, tc.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } diff --git a/plugins/inputs/swap/README.md b/plugins/inputs/swap/README.md index 98389287180fa..c538559ca9aa4 100644 --- a/plugins/inputs/swap/README.md +++ b/plugins/inputs/swap/README.md @@ -4,7 +4,7 @@ The swap plugin collects system swap metrics. For more information on what swap memory is, read [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space). -### Configuration: +## Configuration ```toml # Read metrics about swap memory usage @@ -12,7 +12,7 @@ For more information on what swap memory is, read [All about Linux swap space](h # no configuration ``` -### Metrics: +## Metrics - swap - fields: @@ -23,8 +23,8 @@ For more information on what swap memory is, read [All about Linux swap space](h - in (int, bytes): data swapped in since last boot calculated from page number - out (int, bytes): data swapped out since last boot calculated from page number -### Example Output: +## Example Output -``` +```shell swap total=20855394304i,used_percent=45.43883523785713,used=9476448256i,free=1715331072i 1511894782000000000 ``` diff --git a/plugins/inputs/swap/swap_test.go b/plugins/inputs/swap/swap_test.go index 3f97b354e86b4..85a8adb5c184c 100644 --- a/plugins/inputs/swap/swap_test.go +++ b/plugins/inputs/swap/swap_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md index efb8203515c69..117ee02e8fa97 100644 --- a/plugins/inputs/synproxy/README.md +++ b/plugins/inputs/synproxy/README.md @@ -1,10 +1,9 @@ # Synproxy Input Plugin -The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. +The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section. - -### Configuration +## Configuration The synproxy plugin does not need any configuration @@ -13,7 +12,7 @@ The synproxy plugin does not need any configuration # no configuration ``` -### Metrics +## Metrics The following synproxy counters are gathered @@ -26,24 +25,26 @@ The following synproxy counters are gathered - syn_received (uint32, packets, counter) - SYN received - conn_reopened (uint32, packets, counter) - Connections reopened -### Sample Queries +## Sample Queries Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB: + ```sql SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null); ``` -### Troubleshooting +## Troubleshooting Execute the following CLI command in Linux to test the synproxy counters: + ```sh cat /proc/net/stat/synproxy ``` -### Example Output +## Example Output This section shows example output in Line Protocol format. -``` +```shell synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000 ``` diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index e8fbe62989055..0f50322666fd7 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -7,9 +7,9 @@ import ( "os" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestSynproxyFileNormal(t *testing.T) { @@ -38,8 +38,8 @@ func TestSynproxyFileHeaderMismatch(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid number of columns in data") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid number of columns in data") } func TestSynproxyFileInvalidHex(t *testing.T) { @@ -52,8 +52,8 @@ func TestSynproxyFileInvalidHex(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid value") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid value") } func TestNoSynproxyFile(t *testing.T) { @@ -69,7 +69,7 @@ func TestNoSynproxyFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } // Valid Synproxy file @@ -149,7 +149,7 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) acc.AssertContainsFields(t, "synproxy", telegrafData) } diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index a821a642b0ec8..d2c763e4ec6a0 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -9,7 +9,7 @@ a Unix Domain socket, Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). -### Configuration +## Configuration ```toml [[inputs.syslog]] @@ -68,20 +68,20 @@ Syslog messages should be formatted according to # sdparam_separator = "_" ``` -#### Message transport +### Message transport The `framing` option only applies to streams. It governs the way we expect to receive messages within the stream. Namely, with the [`"octet counting"`](https://tools.ietf.org/html/rfc5425#section-4.3) technique (default) or with the [`"non-transparent"`](https://tools.ietf.org/html/rfc6587#section-3.4.2) framing. The `trailer` option only applies when `framing` option is `"non-transparent"`. It must have one of the following values: `"LF"` (default), or `"NUL"`. -#### Best effort +### Best effort The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog messages. If unset only full messages will be collected. -#### Rsyslog Integration +### Rsyslog Integration Rsyslog can be configured to forward logging messages to Telegraf by configuring [remote logging](https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine). @@ -93,7 +93,8 @@ config file. Add the following lines to `/etc/rsyslog.d/50-telegraf.conf` making adjustments to the target address as needed: -``` + +```shell $ActionQueueType LinkedList # use asynchronous processing $ActionQueueFileName srvrfwd # set file name, also enables disk mode $ActionResumeRetryCount -1 # infinite retries on insert failure @@ -107,7 +108,8 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down ``` You can alternately use `advanced` format (aka RainerScript): -``` + +```bash # forward over tcp with octet framing according to RFC 5425 action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") @@ -117,7 +119,7 @@ action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1 To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). -### Metrics +## Metrics - syslog - tags @@ -136,17 +138,19 @@ To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc - *Structured Data* (string) - timestamp: the time the messages was received -#### Structured Data +### Structured Data Structured data produces field keys by combining the `SD_ID` with the `PARAM_NAME` combined using the `sdparam_separator` as in the following example: -``` + +```shell 170 <165>1 2018-10-01:14:15.000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] An application event log entry... ``` -``` + +```shell syslog,appname=evntslog,facility=local4,hostname=mymachine.example.com,severity=notice exampleSDID@32473_eventID="1011",exampleSDID@32473_eventSource="Application",exampleSDID@32473_iut="3",facility_code=20i,message="An application event log entry...",msgid="ID47",severity_code=5i,timestamp=1065910455003000000i,version=1i 1538421339749472344 ``` -### Troubleshooting +## Troubleshooting You can send debugging messages directly to the input plugin using netcat: @@ -158,14 +162,16 @@ echo "57 <13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc 127.0.0. echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0.1 6514 ``` -#### RFC3164 +### RFC3164 RFC3164 encoded messages are supported for UDP only, but not all vendors output valid RFC3164 messages by default - E.g. Cisco IOS If you see the following error, it is due to a message encoded in this format: - ``` + + ```shell E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] ``` - You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. \ No newline at end of file + + You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index fc7eab1fa0828..bfc6f9283990c 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -13,7 +13,7 @@ import ( "time" "unicode" - syslog "github.com/influxdata/go-syslog/v3" + "github.com/influxdata/go-syslog/v3" "github.com/influxdata/go-syslog/v3/nontransparent" "github.com/influxdata/go-syslog/v3/octetcounting" "github.com/influxdata/go-syslog/v3/rfc3164" @@ -205,7 +205,7 @@ func (s *Syslog) Stop() { // getAddressParts returns the address scheme and host // it also sets defaults for them when missing // when the input address does not specify the protocol it returns an error -func getAddressParts(a string) (string, string, error) { +func getAddressParts(a string) (scheme string, host string, err error) { parts := strings.SplitN(a, "://", 2) if len(parts) != 2 { return "", "", fmt.Errorf("missing protocol within address '%s'", a) @@ -220,7 +220,6 @@ func getAddressParts(a string) (string, string, error) { return parts[0], parts[1], nil } - var host string if u.Hostname() != "" { host = u.Hostname() } @@ -259,7 +258,7 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { message, err := p.Parse(b[:n]) if message != nil { - acc.AddFields("syslog", fields(message, s), tags(message), s.time()) + acc.AddFields("syslog", fields(message, s), tags(message), s.currentTime()) } if err != nil { acc.AddError(err) @@ -383,7 +382,7 @@ func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { acc.AddError(res.Error) } if res.Message != nil { - acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.time()) + acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.currentTime()) } } @@ -473,7 +472,7 @@ func (uc unixCloser) Close() error { return err } -func (s *Syslog) time() time.Time { +func (s *Syslog) currentTime() time.Time { t := s.now() if t == s.lastTime { t = t.Add(time.Nanosecond) diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md index 9775c1a305c95..5c055a3c51af4 100644 --- a/plugins/inputs/sysstat/README.md +++ b/plugins/inputs/sysstat/README.md @@ -6,7 +6,7 @@ package installed. This plugin collects system metrics with the sysstat collector utility `sadc` and parses the created binary data file with the `sadf` utility. -### Configuration: +## Configuration ```toml # Sysstat metrics collector @@ -38,22 +38,22 @@ the created binary data file with the `sadf` utility. ## ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - -r = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" - # -H = "hugepages" # only available for newer linux distributions - # "-I ALL" = "interrupts" # requires INT activity + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + -r = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # -H = "hugepages" # only available for newer linux distributions + # "-I ALL" = "interrupts" # requires INT activity ## Device tags can be used to add additional tags for devices. For example the configuration below ## adds a tag vg with value rootvg for all metrics with sda devices. @@ -61,94 +61,100 @@ the created binary data file with the `sadf` utility. # vg = "rootvg" ``` -### Measurements & Fields: -#### If group=true +## Measurements & Fields + +### If group=true + - cpu - - pct_idle (float) - - pct_iowait (float) - - pct_nice (float) - - pct_steal (float) - - pct_system (float) - - pct_user (float) + - pct_idle (float) + - pct_iowait (float) + - pct_nice (float) + - pct_steal (float) + - pct_system (float) + - pct_user (float) - disk - - avgqu-sz (float) - - avgrq-sz (float) - - await (float) - - pct_util (float) - - rd_sec_pers (float) - - svctm (float) - - tps (float) + - avgqu-sz (float) + - avgrq-sz (float) + - await (float) + - pct_util (float) + - rd_sec_pers (float) + - svctm (float) + - tps (float) And much more, depending on the options you configure. -#### If group=false +### If group=false + - cpu_pct_idle - - value (float) + - value (float) - cpu_pct_iowait - - value (float) + - value (float) - cpu_pct_nice - - value (float) + - value (float) - cpu_pct_steal - - value (float) + - value (float) - cpu_pct_system - - value (float) + - value (float) - cpu_pct_user - - value (float) + - value (float) - disk_avgqu-sz - - value (float) + - value (float) - disk_avgrq-sz - - value (float) + - value (float) - disk_await - - value (float) + - value (float) - disk_pct_util - - value (float) + - value (float) - disk_rd_sec_per_s - - value (float) + - value (float) - disk_svctm - - value (float) + - value (float) - disk_tps - - value (float) + - value (float) And much more, depending on the options you configure. -### Tags: +## Tags - All measurements have the following tags: - - device + - device And more if you define some `device_tags`. -### Example Output: + +## Example Output With the configuration below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = true [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf --config telegraf.conf --input-filter sysstat --test * Plugin: sysstat, Collection 1 > cpu_util,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626657883725 @@ -189,34 +195,36 @@ $ telegraf --config telegraf.conf --input-filter sysstat --test ``` If you change the group value to false like below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = false [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf -config telegraf.conf -input-filter sysstat -test * Plugin: sysstat, Collection 1 > io_tps value=0.5 1459255780126025822 diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 7e69ff41ccdf2..3796aeb19ac58 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -10,7 +10,6 @@ import ( "io" "os" "os/exec" - "path" "strconv" "strings" "sync" @@ -66,7 +65,6 @@ type Sysstat struct { // DeviceTags adds the possibility to add additional tags for devices. DeviceTags map[string][]map[string]string `toml:"device_tags"` - tmpFile string interval int Log telegraf.Logger @@ -149,8 +147,15 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { s.interval = int(time.Since(firstTimestamp).Seconds() + 0.5) } } + + tmpfile, err := os.CreateTemp("", "sysstat-*") + if err != nil { + return fmt.Errorf("failed to create tmp file: %s", err) + } + defer os.Remove(tmpfile.Name()) + ts := time.Now().Add(time.Duration(s.interval) * time.Second) - if err := s.collect(); err != nil { + if err := s.collect(tmpfile.Name()); err != nil { return err } var wg sync.WaitGroup @@ -158,15 +163,11 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(acc telegraf.Accumulator, option string) { defer wg.Done() - acc.AddError(s.parse(acc, option, ts)) + acc.AddError(s.parse(acc, option, tmpfile.Name(), ts)) }(acc, option) } wg.Wait() - if _, err := os.Stat(s.tmpFile); err == nil { - acc.AddError(os.Remove(s.tmpFile)) - } - return nil } @@ -175,12 +176,12 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { // Sadc -S -S ... 2 tmpFile // The above command collects system metrics during and // saves it in binary form to tmpFile. -func (s *Sysstat) collect() error { +func (s *Sysstat) collect(tempfile string) error { options := []string{} for _, act := range s.Activities { options = append(options, "-S", act) } - s.tmpFile = path.Join("/tmp", fmt.Sprintf("sysstat-%d", time.Now().Unix())) + // collectInterval has to be smaller than the telegraf data collection interval collectInterval := s.interval - parseInterval @@ -189,13 +190,10 @@ func (s *Sysstat) collect() error { collectInterval = 1 // In that case we only collect for 1 second. } - options = append(options, strconv.Itoa(collectInterval), "2", s.tmpFile) + options = append(options, strconv.Itoa(collectInterval), "2", tempfile) cmd := execCommand(s.Sadc, options...) out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { - if err := os.Remove(s.tmpFile); err != nil { - s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error()) - } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } return nil @@ -229,8 +227,8 @@ func withCLocale(cmd *exec.Cmd) *exec.Cmd { // parse runs Sadf on the previously saved tmpFile: // Sadf -p -- -p