From 9c33d36818368addbd7b181228d1666d51697488 Mon Sep 17 00:00:00 2001 From: Tim Chan Date: Wed, 8 Jan 2025 14:16:10 -0800 Subject: [PATCH] Wrapped up mongodb telegraf metrics --- .chloggen/chan-tim_mongodbMetrics.yaml | 27 ++ receiver/mongodbreceiver/documentation.md | 32 +++ .../internal/metadata/generated_config.go | 16 ++ .../metadata/generated_config_test.go | 8 + .../internal/metadata/generated_metrics.go | 246 +++++++++++++++++- .../metadata/generated_metrics_test.go | 68 ++++- .../internal/metadata/testdata/config.yaml | 16 ++ receiver/mongodbreceiver/metadata.yaml | 32 +++ receiver/mongodbreceiver/metrics.go | 54 ++++ receiver/mongodbreceiver/scraper.go | 3 + receiver/mongodbreceiver/scraper_test.go | 3 + receiver/mongodbreceiver/testdata/admin.json | 3 +- .../testdata/scraper/expected.yaml | 128 ++------- .../testdata/scraper/partial_scrape.yaml | 22 +- 14 files changed, 535 insertions(+), 123 deletions(-) create mode 100644 .chloggen/chan-tim_mongodbMetrics.yaml diff --git a/.chloggen/chan-tim_mongodbMetrics.yaml b/.chloggen/chan-tim_mongodbMetrics.yaml new file mode 100644 index 000000000000..e9452318800b --- /dev/null +++ b/.chloggen/chan-tim_mongodbMetrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: mongodbreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Added new mongodb metrics to acheive parity with Telegraf + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [37227] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] \ No newline at end of file diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 440ad2b6ad98..c8b698cb7cfb 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -28,6 +28,14 @@ The number of write operations currently being processed. | ---- | ----------- | ---------- | ----------------------- | --------- | | {writes} | Sum | Int | Cumulative | false | +### mongodb.cache.dirty.percent + +The percentage of WiredTiger cache that is dirty. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### mongodb.cache.operations The number of cache operations of the instance. @@ -42,6 +50,14 @@ The number of cache operations of the instance. | ---- | ----------- | ------ | | type | The result of a cache request. | Str: ``hit``, ``miss`` | +### mongodb.cache.used.percent + +The percentage of WiredTiger cache in use. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### mongodb.collection.count The number of collections. @@ -284,6 +300,14 @@ The total time spent performing operations. | ---- | ----------- | ------ | | operation | The MongoDB operation being counted. | Str: ``insert``, ``query``, ``update``, ``delete``, ``getmore``, ``command`` | +### mongodb.page_faults + +The number of page faults. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| {faults} | Sum | Int | Cumulative | true | + ### mongodb.queries_per_sec The number of queries executed per second. @@ -366,6 +390,14 @@ The number of updates executed per second. | ---- | ----------- | ---------- | | {update}/s | Gauge | Double | +### mongodb.wtcache.bytes.read + +The number of bytes read into the WiredTiger cache. + +| Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | +| ---- | ----------- | ---------- | ----------------------- | --------- | +| By | Sum | Int | Cumulative | true | + ## Optional Metrics The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config.go b/receiver/mongodbreceiver/internal/metadata/generated_config.go index 60754566d73f..b43a3dcfefa7 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config.go @@ -30,7 +30,9 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { type MetricsConfig struct { MongodbActiveReads MetricConfig `mapstructure:"mongodb.active.reads"` MongodbActiveWrites MetricConfig `mapstructure:"mongodb.active.writes"` + MongodbCacheDirtyPercent MetricConfig `mapstructure:"mongodb.cache.dirty.percent"` MongodbCacheOperations MetricConfig `mapstructure:"mongodb.cache.operations"` + MongodbCacheUsedPercent MetricConfig `mapstructure:"mongodb.cache.used.percent"` MongodbCollectionCount MetricConfig `mapstructure:"mongodb.collection.count"` MongodbCommandsPerSec MetricConfig `mapstructure:"mongodb.commands_per_sec"` MongodbConnectionCount MetricConfig `mapstructure:"mongodb.connection.count"` @@ -62,6 +64,7 @@ type MetricsConfig struct { MongodbOperationLatencyTime MetricConfig `mapstructure:"mongodb.operation.latency.time"` MongodbOperationReplCount MetricConfig `mapstructure:"mongodb.operation.repl.count"` MongodbOperationTime MetricConfig `mapstructure:"mongodb.operation.time"` + MongodbPageFaults MetricConfig `mapstructure:"mongodb.page_faults"` MongodbQueriesPerSec MetricConfig `mapstructure:"mongodb.queries_per_sec"` MongodbReplCommandsPerSec MetricConfig `mapstructure:"mongodb.repl_commands_per_sec"` MongodbReplDeletesPerSec MetricConfig `mapstructure:"mongodb.repl_deletes_per_sec"` @@ -73,6 +76,7 @@ type MetricsConfig struct { MongodbStorageSize MetricConfig `mapstructure:"mongodb.storage.size"` MongodbUpdatesPerSec MetricConfig `mapstructure:"mongodb.updates_per_sec"` MongodbUptime MetricConfig `mapstructure:"mongodb.uptime"` + MongodbWtcacheBytesRead MetricConfig `mapstructure:"mongodb.wtcache.bytes.read"` } func DefaultMetricsConfig() MetricsConfig { @@ -83,9 +87,15 @@ func DefaultMetricsConfig() MetricsConfig { MongodbActiveWrites: MetricConfig{ Enabled: true, }, + MongodbCacheDirtyPercent: MetricConfig{ + Enabled: true, + }, MongodbCacheOperations: MetricConfig{ Enabled: true, }, + MongodbCacheUsedPercent: MetricConfig{ + Enabled: true, + }, MongodbCollectionCount: MetricConfig{ Enabled: true, }, @@ -179,6 +189,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbOperationTime: MetricConfig{ Enabled: true, }, + MongodbPageFaults: MetricConfig{ + Enabled: true, + }, MongodbQueriesPerSec: MetricConfig{ Enabled: true, }, @@ -212,6 +225,9 @@ func DefaultMetricsConfig() MetricsConfig { MongodbUptime: MetricConfig{ Enabled: false, }, + MongodbWtcacheBytesRead: MetricConfig{ + Enabled: true, + }, } } diff --git a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go index d527783e5903..31ec4f99a34a 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_config_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_config_test.go @@ -27,7 +27,9 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: true}, MongodbActiveWrites: MetricConfig{Enabled: true}, + MongodbCacheDirtyPercent: MetricConfig{Enabled: true}, MongodbCacheOperations: MetricConfig{Enabled: true}, + MongodbCacheUsedPercent: MetricConfig{Enabled: true}, MongodbCollectionCount: MetricConfig{Enabled: true}, MongodbCommandsPerSec: MetricConfig{Enabled: true}, MongodbConnectionCount: MetricConfig{Enabled: true}, @@ -59,6 +61,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: true}, MongodbOperationReplCount: MetricConfig{Enabled: true}, MongodbOperationTime: MetricConfig{Enabled: true}, + MongodbPageFaults: MetricConfig{Enabled: true}, MongodbQueriesPerSec: MetricConfig{Enabled: true}, MongodbReplCommandsPerSec: MetricConfig{Enabled: true}, MongodbReplDeletesPerSec: MetricConfig{Enabled: true}, @@ -70,6 +73,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbStorageSize: MetricConfig{Enabled: true}, MongodbUpdatesPerSec: MetricConfig{Enabled: true}, MongodbUptime: MetricConfig{Enabled: true}, + MongodbWtcacheBytesRead: MetricConfig{Enabled: true}, }, ResourceAttributes: ResourceAttributesConfig{ Database: ResourceAttributeConfig{Enabled: true}, @@ -84,7 +88,9 @@ func TestMetricsBuilderConfig(t *testing.T) { Metrics: MetricsConfig{ MongodbActiveReads: MetricConfig{Enabled: false}, MongodbActiveWrites: MetricConfig{Enabled: false}, + MongodbCacheDirtyPercent: MetricConfig{Enabled: false}, MongodbCacheOperations: MetricConfig{Enabled: false}, + MongodbCacheUsedPercent: MetricConfig{Enabled: false}, MongodbCollectionCount: MetricConfig{Enabled: false}, MongodbCommandsPerSec: MetricConfig{Enabled: false}, MongodbConnectionCount: MetricConfig{Enabled: false}, @@ -116,6 +122,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbOperationLatencyTime: MetricConfig{Enabled: false}, MongodbOperationReplCount: MetricConfig{Enabled: false}, MongodbOperationTime: MetricConfig{Enabled: false}, + MongodbPageFaults: MetricConfig{Enabled: false}, MongodbQueriesPerSec: MetricConfig{Enabled: false}, MongodbReplCommandsPerSec: MetricConfig{Enabled: false}, MongodbReplDeletesPerSec: MetricConfig{Enabled: false}, @@ -127,6 +134,7 @@ func TestMetricsBuilderConfig(t *testing.T) { MongodbStorageSize: MetricConfig{Enabled: false}, MongodbUpdatesPerSec: MetricConfig{Enabled: false}, MongodbUptime: MetricConfig{Enabled: false}, + MongodbWtcacheBytesRead: MetricConfig{Enabled: false}, }, ResourceAttributes: ResourceAttributesConfig{ Database: ResourceAttributeConfig{Enabled: false}, diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go index 07fb05cccaa5..9028f5fb8edd 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/receiver" ) -// AttributeConnectionType specifies the a value connection_type attribute. +// AttributeConnectionType specifies the value connection_type attribute. type AttributeConnectionType int const ( @@ -42,7 +42,7 @@ var MapAttributeConnectionType = map[string]AttributeConnectionType{ "current": AttributeConnectionTypeCurrent, } -// AttributeLockMode specifies the a value lock_mode attribute. +// AttributeLockMode specifies the value lock_mode attribute. type AttributeLockMode int const ( @@ -76,7 +76,7 @@ var MapAttributeLockMode = map[string]AttributeLockMode{ "intent_exclusive": AttributeLockModeIntentExclusive, } -// AttributeLockType specifies the a value lock_type attribute. +// AttributeLockType specifies the value lock_type attribute. type AttributeLockType int const ( @@ -126,7 +126,7 @@ var MapAttributeLockType = map[string]AttributeLockType{ "oplog": AttributeLockTypeOplog, } -// AttributeMemoryType specifies the a value memory_type attribute. +// AttributeMemoryType specifies the value memory_type attribute. type AttributeMemoryType int const ( @@ -152,7 +152,7 @@ var MapAttributeMemoryType = map[string]AttributeMemoryType{ "virtual": AttributeMemoryTypeVirtual, } -// AttributeOperation specifies the a value operation attribute. +// AttributeOperation specifies the value operation attribute. type AttributeOperation int const ( @@ -194,7 +194,7 @@ var MapAttributeOperation = map[string]AttributeOperation{ "command": AttributeOperationCommand, } -// AttributeOperationLatency specifies the a value operation_latency attribute. +// AttributeOperationLatency specifies the value operation_latency attribute. type AttributeOperationLatency int const ( @@ -224,7 +224,7 @@ var MapAttributeOperationLatency = map[string]AttributeOperationLatency{ "command": AttributeOperationLatencyCommand, } -// AttributeType specifies the a value type attribute. +// AttributeType specifies the value type attribute. type AttributeType int const ( @@ -352,6 +352,55 @@ func newMetricMongodbActiveWrites(cfg MetricConfig) metricMongodbActiveWrites { return m } +type metricMongodbCacheDirtyPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.cache.dirty.percent metric with initial data. +func (m *metricMongodbCacheDirtyPercent) init() { + m.data.SetName("mongodb.cache.dirty.percent") + m.data.SetDescription("The percentage of WiredTiger cache that is dirty.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbCacheDirtyPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbCacheDirtyPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbCacheDirtyPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbCacheDirtyPercent(cfg MetricConfig) metricMongodbCacheDirtyPercent { + m := metricMongodbCacheDirtyPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbCacheOperations struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -405,6 +454,55 @@ func newMetricMongodbCacheOperations(cfg MetricConfig) metricMongodbCacheOperati return m } +type metricMongodbCacheUsedPercent struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.cache.used.percent metric with initial data. +func (m *metricMongodbCacheUsedPercent) init() { + m.data.SetName("mongodb.cache.used.percent") + m.data.SetDescription("The percentage of WiredTiger cache in use.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricMongodbCacheUsedPercent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbCacheUsedPercent) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbCacheUsedPercent) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbCacheUsedPercent(cfg MetricConfig) metricMongodbCacheUsedPercent { + m := metricMongodbCacheUsedPercent{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbCollectionCount struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2000,6 +2098,57 @@ func newMetricMongodbOperationTime(cfg MetricConfig) metricMongodbOperationTime return m } +type metricMongodbPageFaults struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.page_faults metric with initial data. +func (m *metricMongodbPageFaults) init() { + m.data.SetName("mongodb.page_faults") + m.data.SetDescription("The number of page faults.") + m.data.SetUnit("{faults}") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbPageFaults) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbPageFaults) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbPageFaults(cfg MetricConfig) metricMongodbPageFaults { + m := metricMongodbPageFaults{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricMongodbQueriesPerSec struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2545,6 +2694,57 @@ func newMetricMongodbUptime(cfg MetricConfig) metricMongodbUptime { return m } +type metricMongodbWtcacheBytesRead struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills mongodb.wtcache.bytes.read metric with initial data. +func (m *metricMongodbWtcacheBytesRead) init() { + m.data.SetName("mongodb.wtcache.bytes.read") + m.data.SetDescription("The number of bytes read into the WiredTiger cache.") + m.data.SetUnit("By") + m.data.SetEmptySum() + m.data.Sum().SetIsMonotonic(true) + m.data.Sum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) +} + +func (m *metricMongodbWtcacheBytesRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.config.Enabled { + return + } + dp := m.data.Sum().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricMongodbWtcacheBytesRead) updateCapacity() { + if m.data.Sum().DataPoints().Len() > m.capacity { + m.capacity = m.data.Sum().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricMongodbWtcacheBytesRead) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Sum().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricMongodbWtcacheBytesRead(cfg MetricConfig) metricMongodbWtcacheBytesRead { + m := metricMongodbWtcacheBytesRead{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { @@ -2557,7 +2757,9 @@ type MetricsBuilder struct { resourceAttributeExcludeFilter map[string]filter.Filter metricMongodbActiveReads metricMongodbActiveReads metricMongodbActiveWrites metricMongodbActiveWrites + metricMongodbCacheDirtyPercent metricMongodbCacheDirtyPercent metricMongodbCacheOperations metricMongodbCacheOperations + metricMongodbCacheUsedPercent metricMongodbCacheUsedPercent metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbCommandsPerSec metricMongodbCommandsPerSec metricMongodbConnectionCount metricMongodbConnectionCount @@ -2589,6 +2791,7 @@ type MetricsBuilder struct { metricMongodbOperationLatencyTime metricMongodbOperationLatencyTime metricMongodbOperationReplCount metricMongodbOperationReplCount metricMongodbOperationTime metricMongodbOperationTime + metricMongodbPageFaults metricMongodbPageFaults metricMongodbQueriesPerSec metricMongodbQueriesPerSec metricMongodbReplCommandsPerSec metricMongodbReplCommandsPerSec metricMongodbReplDeletesPerSec metricMongodbReplDeletesPerSec @@ -2600,6 +2803,7 @@ type MetricsBuilder struct { metricMongodbStorageSize metricMongodbStorageSize metricMongodbUpdatesPerSec metricMongodbUpdatesPerSec metricMongodbUptime metricMongodbUptime + metricMongodbWtcacheBytesRead metricMongodbWtcacheBytesRead } // MetricBuilderOption applies changes to default metrics builder. @@ -2628,7 +2832,9 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt buildInfo: settings.BuildInfo, metricMongodbActiveReads: newMetricMongodbActiveReads(mbc.Metrics.MongodbActiveReads), metricMongodbActiveWrites: newMetricMongodbActiveWrites(mbc.Metrics.MongodbActiveWrites), + metricMongodbCacheDirtyPercent: newMetricMongodbCacheDirtyPercent(mbc.Metrics.MongodbCacheDirtyPercent), metricMongodbCacheOperations: newMetricMongodbCacheOperations(mbc.Metrics.MongodbCacheOperations), + metricMongodbCacheUsedPercent: newMetricMongodbCacheUsedPercent(mbc.Metrics.MongodbCacheUsedPercent), metricMongodbCollectionCount: newMetricMongodbCollectionCount(mbc.Metrics.MongodbCollectionCount), metricMongodbCommandsPerSec: newMetricMongodbCommandsPerSec(mbc.Metrics.MongodbCommandsPerSec), metricMongodbConnectionCount: newMetricMongodbConnectionCount(mbc.Metrics.MongodbConnectionCount), @@ -2660,6 +2866,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbOperationLatencyTime: newMetricMongodbOperationLatencyTime(mbc.Metrics.MongodbOperationLatencyTime), metricMongodbOperationReplCount: newMetricMongodbOperationReplCount(mbc.Metrics.MongodbOperationReplCount), metricMongodbOperationTime: newMetricMongodbOperationTime(mbc.Metrics.MongodbOperationTime), + metricMongodbPageFaults: newMetricMongodbPageFaults(mbc.Metrics.MongodbPageFaults), metricMongodbQueriesPerSec: newMetricMongodbQueriesPerSec(mbc.Metrics.MongodbQueriesPerSec), metricMongodbReplCommandsPerSec: newMetricMongodbReplCommandsPerSec(mbc.Metrics.MongodbReplCommandsPerSec), metricMongodbReplDeletesPerSec: newMetricMongodbReplDeletesPerSec(mbc.Metrics.MongodbReplDeletesPerSec), @@ -2671,6 +2878,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricMongodbStorageSize: newMetricMongodbStorageSize(mbc.Metrics.MongodbStorageSize), metricMongodbUpdatesPerSec: newMetricMongodbUpdatesPerSec(mbc.Metrics.MongodbUpdatesPerSec), metricMongodbUptime: newMetricMongodbUptime(mbc.Metrics.MongodbUptime), + metricMongodbWtcacheBytesRead: newMetricMongodbWtcacheBytesRead(mbc.Metrics.MongodbWtcacheBytesRead), resourceAttributeIncludeFilter: make(map[string]filter.Filter), resourceAttributeExcludeFilter: make(map[string]filter.Filter), } @@ -2763,7 +2971,9 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricMongodbActiveReads.emit(ils.Metrics()) mb.metricMongodbActiveWrites.emit(ils.Metrics()) + mb.metricMongodbCacheDirtyPercent.emit(ils.Metrics()) mb.metricMongodbCacheOperations.emit(ils.Metrics()) + mb.metricMongodbCacheUsedPercent.emit(ils.Metrics()) mb.metricMongodbCollectionCount.emit(ils.Metrics()) mb.metricMongodbCommandsPerSec.emit(ils.Metrics()) mb.metricMongodbConnectionCount.emit(ils.Metrics()) @@ -2795,6 +3005,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbOperationLatencyTime.emit(ils.Metrics()) mb.metricMongodbOperationReplCount.emit(ils.Metrics()) mb.metricMongodbOperationTime.emit(ils.Metrics()) + mb.metricMongodbPageFaults.emit(ils.Metrics()) mb.metricMongodbQueriesPerSec.emit(ils.Metrics()) mb.metricMongodbReplCommandsPerSec.emit(ils.Metrics()) mb.metricMongodbReplDeletesPerSec.emit(ils.Metrics()) @@ -2806,6 +3017,7 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricMongodbStorageSize.emit(ils.Metrics()) mb.metricMongodbUpdatesPerSec.emit(ils.Metrics()) mb.metricMongodbUptime.emit(ils.Metrics()) + mb.metricMongodbWtcacheBytesRead.emit(ils.Metrics()) for _, op := range options { op.apply(rm) @@ -2847,11 +3059,21 @@ func (mb *MetricsBuilder) RecordMongodbActiveWritesDataPoint(ts pcommon.Timestam mb.metricMongodbActiveWrites.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbCacheDirtyPercentDataPoint adds a data point to mongodb.cache.dirty.percent metric. +func (mb *MetricsBuilder) RecordMongodbCacheDirtyPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCacheDirtyPercent.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue AttributeType) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue.String()) } +// RecordMongodbCacheUsedPercentDataPoint adds a data point to mongodb.cache.used.percent metric. +func (mb *MetricsBuilder) RecordMongodbCacheUsedPercentDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricMongodbCacheUsedPercent.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric. func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val) @@ -3007,6 +3229,11 @@ func (mb *MetricsBuilder) RecordMongodbOperationTimeDataPoint(ts pcommon.Timesta mb.metricMongodbOperationTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue.String()) } +// RecordMongodbPageFaultsDataPoint adds a data point to mongodb.page_faults metric. +func (mb *MetricsBuilder) RecordMongodbPageFaultsDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbPageFaults.recordDataPoint(mb.startTime, ts, val) +} + // RecordMongodbQueriesPerSecDataPoint adds a data point to mongodb.queries_per_sec metric. func (mb *MetricsBuilder) RecordMongodbQueriesPerSecDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbQueriesPerSec.recordDataPoint(mb.startTime, ts, val) @@ -3062,6 +3289,11 @@ func (mb *MetricsBuilder) RecordMongodbUptimeDataPoint(ts pcommon.Timestamp, val mb.metricMongodbUptime.recordDataPoint(mb.startTime, ts, val) } +// RecordMongodbWtcacheBytesReadDataPoint adds a data point to mongodb.wtcache.bytes.read metric. +func (mb *MetricsBuilder) RecordMongodbWtcacheBytesReadDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricMongodbWtcacheBytesRead.recordDataPoint(mb.startTime, ts, val) +} + // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go index d753e63ba597..d95f21181e90 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_test.go @@ -76,10 +76,18 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbActiveWritesDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbCacheDirtyPercentDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCacheOperationsDataPoint(ts, 1, AttributeTypeHit) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbCacheUsedPercentDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbCollectionCountDataPoint(ts, 1) @@ -198,6 +206,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbOperationTimeDataPoint(ts, 1, AttributeOperationInsert) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbPageFaultsDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordMongodbQueriesPerSecDataPoint(ts, 1) @@ -241,6 +253,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordMongodbUptimeDataPoint(ts, 1) + defaultMetricsCount++ + allMetricsCount++ + mb.RecordMongodbWtcacheBytesReadDataPoint(ts, 1) + rb := mb.NewResourceBuilder() rb.SetDatabase("database-val") rb.SetServerAddress("server.address-val") @@ -295,6 +311,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.cache.dirty.percent": + assert.False(t, validatedMetrics["mongodb.cache.dirty.percent"], "Found a duplicate in the metrics slice: mongodb.cache.dirty.percent") + validatedMetrics["mongodb.cache.dirty.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The percentage of WiredTiger cache that is dirty.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.cache.operations": assert.False(t, validatedMetrics["mongodb.cache.operations"], "Found a duplicate in the metrics slice: mongodb.cache.operations") validatedMetrics["mongodb.cache.operations"] = true @@ -312,6 +340,18 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("type") assert.True(t, ok) assert.EqualValues(t, "hit", attrVal.Str()) + case "mongodb.cache.used.percent": + assert.False(t, validatedMetrics["mongodb.cache.used.percent"], "Found a duplicate in the metrics slice: mongodb.cache.used.percent") + validatedMetrics["mongodb.cache.used.percent"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "The percentage of WiredTiger cache in use.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) case "mongodb.collection.count": assert.False(t, validatedMetrics["mongodb.collection.count"], "Found a duplicate in the metrics slice: mongodb.collection.count") validatedMetrics["mongodb.collection.count"] = true @@ -780,6 +820,20 @@ func TestMetricsBuilder(t *testing.T) { attrVal, ok := dp.Attributes().Get("operation") assert.True(t, ok) assert.EqualValues(t, "insert", attrVal.Str()) + case "mongodb.page_faults": + assert.False(t, validatedMetrics["mongodb.page_faults"], "Found a duplicate in the metrics slice: mongodb.page_faults") + validatedMetrics["mongodb.page_faults"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of page faults.", ms.At(i).Description()) + assert.Equal(t, "{faults}", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) + dp := ms.At(i).Sum().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) case "mongodb.queries_per_sec": assert.False(t, validatedMetrics["mongodb.queries_per_sec"], "Found a duplicate in the metrics slice: mongodb.queries_per_sec") validatedMetrics["mongodb.queries_per_sec"] = true @@ -912,12 +966,14 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, "The amount of time that the server has been running.", ms.At(i).Description()) assert.Equal(t, "ms", ms.At(i).Unit()) assert.True(t, ms.At(i).Sum().IsMonotonic()) - assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) - dp := ms.At(i).Sum().DataPoints().At(0) - assert.Equal(t, start, dp.StartTimestamp()) - assert.Equal(t, ts, dp.Timestamp()) - assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) - assert.Equal(t, int64(1), dp.IntValue()) + case "mongodb.wtcache.bytes.read": + assert.False(t, validatedMetrics["mongodb.wtcache.bytes.read"], "Found a duplicate in the metrics slice: mongodb.wtcache.bytes.read") + validatedMetrics["mongodb.wtcache.bytes.read"] = true + assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) + assert.Equal(t, "The number of bytes read into the WiredTiger cache.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) } } }) diff --git a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml index e9f47c38039b..75316c9a30e2 100644 --- a/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/mongodbreceiver/internal/metadata/testdata/config.yaml @@ -5,8 +5,12 @@ all_set: enabled: true mongodb.active.writes: enabled: true + mongodb.cache.dirty.percent: + enabled: true mongodb.cache.operations: enabled: true + mongodb.cache.used.percent: + enabled: true mongodb.collection.count: enabled: true mongodb.commands_per_sec: @@ -69,6 +73,8 @@ all_set: enabled: true mongodb.operation.time: enabled: true + mongodb.page_faults: + enabled: true mongodb.queries_per_sec: enabled: true mongodb.repl_commands_per_sec: @@ -91,6 +97,8 @@ all_set: enabled: true mongodb.uptime: enabled: true + mongodb.wtcache.bytes.read: + enabled: true resource_attributes: database: enabled: true @@ -104,8 +112,12 @@ none_set: enabled: false mongodb.active.writes: enabled: false + mongodb.cache.dirty.percent: + enabled: false mongodb.cache.operations: enabled: false + mongodb.cache.used.percent: + enabled: false mongodb.collection.count: enabled: false mongodb.commands_per_sec: @@ -168,6 +180,8 @@ none_set: enabled: false mongodb.operation.time: enabled: false + mongodb.page_faults: + enabled: false mongodb.queries_per_sec: enabled: false mongodb.repl_commands_per_sec: @@ -190,6 +204,8 @@ none_set: enabled: false mongodb.uptime: enabled: false + mongodb.wtcache.bytes.read: + enabled: false resource_attributes: database: enabled: false diff --git a/receiver/mongodbreceiver/metadata.yaml b/receiver/mongodbreceiver/metadata.yaml index c4fefdd5cbdb..0e6b8626bf24 100644 --- a/receiver/mongodbreceiver/metadata.yaml +++ b/receiver/mongodbreceiver/metadata.yaml @@ -480,6 +480,38 @@ metrics: aggregation_temporality: cumulative monotonic: false attributes: [] + mongodb.wtcache.bytes.read: + description: The number of bytes read into the WiredTiger cache. + unit: "By" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + attributes: [] + mongodb.cache.dirty.percent: + description: The percentage of WiredTiger cache that is dirty. + unit: "1" + enabled: true + gauge: + value_type: double + attributes: [] + mongodb.cache.used.percent: + description: The percentage of WiredTiger cache in use. + unit: "1" + enabled: true + gauge: + value_type: double + attributes: [] + mongodb.page_faults: + description: The number of page faults. + unit: "{faults}" + enabled: true + sum: + value_type: int + aggregation_temporality: cumulative + monotonic: true + attributes: [] tests: config: diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index bcf9be33a7d9..5a0b0a16afba 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -392,6 +392,57 @@ func (s *mongodbScraper) recordActiveReads(now pcommon.Timestamp, doc bson.M, er s.mb.RecordMongodbActiveReadsDataPoint(now, val) } +func (s *mongodbScraper) recordWTCacheBytes(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"wiredTiger", "cache", "bytes read into cache"} + metricName := "mongodb.wtcache.bytes.read" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbWtcacheBytesReadDataPoint(now, val) +} + +func (s *mongodbScraper) recordCachePercentages(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + wt, ok := doc["wiredTiger"].(bson.M) + if !ok { + errs.AddPartial(2, errors.New("failed to find wiredTiger metrics")) + return + } + + cache, ok := wt["cache"].(bson.M) + if !ok { + errs.AddPartial(2, errors.New("failed to find cache metrics")) + return + } + + // Calculate dirty percentage + trackedDirtyBytes, err1 := collectMetric(cache, []string{"tracked dirty bytes in the cache"}) + maxBytes, err2 := collectMetric(cache, []string{"maximum bytes configured"}) + if err1 == nil && err2 == nil && maxBytes > 0 { + dirtyPercent := float64(trackedDirtyBytes) / float64(maxBytes) * 100 + s.mb.RecordMongodbCacheDirtyPercentDataPoint(now, dirtyPercent) + } + + // Calculate used percentage + bytesInUse, err3 := collectMetric(cache, []string{"bytes currently in the cache"}) + if err3 == nil && maxBytes > 0 { + usedPercent := float64(bytesInUse) / float64(maxBytes) * 100 + s.mb.RecordMongodbCacheUsedPercentDataPoint(now, usedPercent) + } +} + +func (s *mongodbScraper) recordPageFaults(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { + metricPath := []string{"extra_info", "page_faults"} + metricName := "mongodb.page_faults" + val, err := collectMetric(doc, metricPath) + if err != nil { + errs.AddPartial(1, fmt.Errorf(collectMetricError, metricName, err)) + return + } + s.mb.RecordMongodbPageFaultsDataPoint(now, val) +} + func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M, errs *scrapererror.ScrapeErrors) { storageEngine, err := dig(doc, []string{"storageEngine", "name"}) if err != nil { @@ -710,6 +761,9 @@ func collectMetric(document bson.M, path []string) (int64, error) { } func dig(document bson.M, path []string) (any, error) { + if len(path) == 0 { + return nil, errKeyNotFound + } curItem, remainingPath := path[0], path[1:] value := document[curItem] if value == nil { diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 270da1a67954..465b39f7d535 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -261,6 +261,9 @@ func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M s.recordActiveWrites(now, document, errs) s.recordActiveReads(now, document, errs) s.recordFlushesPerSecond(now, document, errs) + s.recordWTCacheBytes(now, document, errs) + s.recordCachePercentages(now, document, errs) + s.recordPageFaults(now, document, errs) } func (s *mongodbScraper) recordIndexStats(now pcommon.Timestamp, indexStats []bson.M, databaseName string, collectionName string, errs *scrapererror.ScrapeErrors) { diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index 32199c49e337..d8441c70489f 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -115,6 +115,9 @@ var ( "failed to collect metric mongodb.active.reads: could not find key for metric", "failed to collect metric mongodb.active.writes: could not find key for metric", "failed to collect metric mongodb.flushes_per_sec: could not find key for metric", + "failed to collect metric mongodb.page_faults: could not find key for metric", + "failed to collect metric mongodb.wtcache.bytes.read: could not find key for metric", + "failed to find wiredTiger metrics", }, "; ")) errAllClientFailedFetch = errors.New( strings.Join( diff --git a/receiver/mongodbreceiver/testdata/admin.json b/receiver/mongodbreceiver/testdata/admin.json index a51a152fe6c9..872b3d0cced7 100644 --- a/receiver/mongodbreceiver/testdata/admin.json +++ b/receiver/mongodbreceiver/testdata/admin.json @@ -392,7 +392,8 @@ "wiredTiger": { "cache": { "pages read into cache": 14, - "pages requested from the cache": 215 + "pages requested from the cache": 215, + "bytes read into cache": 10 }, "session": { "open session count": { diff --git a/receiver/mongodbreceiver/testdata/scraper/expected.yaml b/receiver/mongodbreceiver/testdata/scraper/expected.yaml index f975db0998e8..d44a95bd5479 100644 --- a/receiver/mongodbreceiver/testdata/scraper/expected.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/expected.yaml @@ -148,114 +148,6 @@ resourceMetrics: timeUnixNano: "2000000" isMonotonic: true unit: '{operations}' - # - description: The total number of queries per second. - # name: mongodb.queries_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{query}/s}' - # - description: The total number of insertions per second. - # name: mongodb.inserts_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{insert}/s}' - # - description: The total number of commands per second. - # name: mongodb.commands_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{command}/s}' - # - description: The total number of getmores per second. - # name: mongodb.getmores_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{getmore}/s}' - # - description: The total number of deletes per second. - # name: mongodb.deletes_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{delete}/s}' - # - description: The total number of updates per second. - # name: mongodb.updates_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{update}/s}' - # - description: The total number replicated of queries per second. - # name: mongodb.repl_queries_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{query}/s}' - # - description: The total number replicated of insertions per second. - # name: mongodb.repl_inserts_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{insert}/s}' - # - description: The total number of replicated commands per second. - # name: mongodb.repl_commands_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{command}/s}' - # - description: The total number of replicated getmores per second. - # name: mongodb.repl_getmores_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{getmore}/s}' - # - description: The total number of replicated deletes per second. - # name: mongodb.repl_deletes_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{delete}/s}' - # - description: The total number of replicated updates per second. - # name: mongodb.repl_updates_per_sec - # gauge: - # aggregationTemporality: 2 - # dataPoints: - # - asInt: "100" - # startTimeUnixNano: "1000000" - # timeUnixNano: "2000000" - # unit: '{update}/s}' - name: mongodb.active.reads description: The number of read operations currently being processed. sum: @@ -274,6 +166,26 @@ resourceMetrics: timeUnixNano: "2000000" aggregationTemporality: 2 unit: '{writes}' + - description: The number of bytes read into the WiredTiger cache. + name: mongodb.wtcache.bytes.read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" # Value from test fixture + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: 'By' + - description: The number of page faults. + name: mongodb.page_faults + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" # Value from test fixture (extra_info.page_faults) + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' - description: The latency of operations. gauge: dataPoints: diff --git a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml index fd325aa3a1a0..879caace423a 100644 --- a/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml +++ b/receiver/mongodbreceiver/testdata/scraper/partial_scrape.yaml @@ -166,7 +166,27 @@ resourceMetrics: startTimeUnixNano: "1000000" timeUnixNano: "2000000" aggregationTemporality: 2 - unit: '{writes}' + unit: '{writes}' + - description: The number of bytes read into the WiredTiger cache. + name: mongodb.wtcache.bytes.read + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "10" # Value from test fixture + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: 'By' + - description: The number of page faults. + name: mongodb.page_faults + sum: + aggregationTemporality: 2 + dataPoints: + - asInt: "0" # Value from test fixture (extra_info.page_faults) + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + isMonotonic: true + unit: '{faults}' - description: The latency of operations. gauge: dataPoints: