Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

prometheus.remote_write.metrics_service.receiver Does not exist or is out of scope #2319

Open
chreniuc opened this issue Dec 30, 2024 · 1 comment
Labels
bug Something isn't working

Comments

@chreniuc
Copy link

chreniuc commented Dec 30, 2024

What's wrong?

I tried integrating alloy on my server(Ubuntu 24.04) with grafana Cloud. But encountered problems when trying to add the remote config for the node exporter, mysql and loki logs.

Steps to reproduce

I tried to add the config for the node exporter(Connections -> Add new connection -> Linux Server), via the remote config(same thing happens when I tried to add it to the config file, on the server, manually, appending it to the existing contents), I got the following error:

	ts=2024-12-30T11:44:35.814363889Z level=error msg="failed to evaluate config" controller_path=/ controller_id=remotecfg trace_id=1d02509b4cfb1c88ba49cf76a5db8be2 node=NodeExporter.default err="updating custom component: 40:17: component \"prometheus.remote_write.metrics_service.receiver\" does not exist or is out of scope"

Same thing happens when I try to add for mysql or loki logs:

# Loki
ts=2024-12-30T11:44:35.814602786Z level=error msg="failed to fetch remote configuration from the API" service=remotecfg err="87:17: component \"loki.write.grafana_cloud_loki.receiver\" does not exist or is out of scope (and 2 more diagnostics)"

# Mysql
ts=2024-12-30T11:44:35.813944533Z level=error msg="failed to evaluate config" controller_path=/ controller_id=remotecfg trace_id=1d02509b4cfb1c88ba49cf76a5db8be2 node=mariadb.default err="updating custom component: 87:17: component \"loki.write.grafana_cloud_loki.receiver\" does not exist or is out of scope (and 1 more diagnostics)"

System information

Ubuntu 24.04

Software version

Alloy v1.5.1 (branch: HEAD, revision: dc8a365)

Configuration

declare "NodeExporter" {
	discovery.relabel "integrations_node_exporter" {
		targets = prometheus.exporter.unix.integrations_node_exporter.targets

		rule {
			target_label = "instance"
			replacement  = constants.hostname
		}

		rule {
			target_label = "job"
			replacement  = "integrations/node_exporter"
		}
	}

	prometheus.exporter.unix "integrations_node_exporter" {
		disable_collectors = ["ipvs", "btrfs", "infiniband", "xfs", "zfs"]

		filesystem {
			fs_types_exclude     = "^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|tmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
			mount_points_exclude = "^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/)"
			mount_timeout        = "5s"
		}

		netclass {
			ignored_devices = "^(veth.*|cali.*|[a-f0-9]{15})$"
		}

		netdev {
			device_exclude = "^(veth.*|cali.*|[a-f0-9]{15})$"
		}
	}

	prometheus.scrape "integrations_node_exporter" {
		targets    = discovery.relabel.integrations_node_exporter.output
		forward_to = [prometheus.relabel.integrations_node_exporter.receiver]
	}

	prometheus.relabel "integrations_node_exporter" {
		forward_to = [prometheus.remote_write.metrics_service.receiver]

		rule {
			source_labels = ["__name__"]
			regex         = "node_scrape_collector_.+"
			action        = "drop"
		}
	}
}

NodeExporter "default" { }

declare "mariadb" {
	prometheus.exporter.mysql "integrations_mysqld_exporter" {
		data_source_name = "XXXXXX:XXXXXXX@(localhost:3306)/"
	}

	discovery.relabel "integrations_mysqld_exporter" {
		targets = prometheus.exporter.mysql.integrations_mysqld_exporter.targets

		rule {
			target_label = "job"
			replacement  = "integrations/mysql"
		}

		rule {
			target_label = "instance"
			replacement  = constants.hostname
		}
	}

	prometheus.scrape "integrations_mysqld_exporter" {
		targets    = discovery.relabel.integrations_mysqld_exporter.output
		forward_to = [prometheus.remote_write.metrics_service.receiver]
		job_name   = "integrations/mysqld_exporter"
	}

	local.file_match "logs_integrations_mysql" {
		path_targets = [{
			__address__ = "localhost",
			__path__    = "/var/log/mysql/*.log",
			instance    = constants.hostname,
			job         = "integrations/mysql",
		}]
	}

	loki.process "logs_integrations_mysql" {
		forward_to = [loki.write.grafana_cloud_loki.receiver]

		stage.regex {
			expression = "(?P<timestamp>.+) (?P<thread>[\\d]+) \\[(?P<label>.+?)\\]( \\[(?P<err_code>.+?)\\] \\[(?P<subsystem>.+?)\\])? (?P<msg>.+)"
		}

		stage.labels {
			values = {
				err_code  = null,
				level     = "label",
				subsystem = null,
			}
		}

		stage.drop {
			drop_counter_reason = "drop empty lines"
			expression          = "^ *$"
		}
	}

	loki.source.file "logs_integrations_mysql" {
		targets    = local.file_match.logs_integrations_mysql.targets
		forward_to = [loki.process.logs_integrations_mysql.receiver]
	}
}

mariadb "default" { }

declare "self_monitoring_logs_linux" {

	// THIS IS A GENERATED REMOTE CONFIGURATION.
	//
	//   * You can edit the contents and matchers for this configuration without them being overwritten.
	//   * If you delete ALL generated configurations, the latest default versions will be recreated.
	//   * This configuration requires the following environment variables to be set wherever alloy is running:
	//     * GCLOUD_RW_API_KEY: The Grafana Cloud API key with write access to Loki.
	//     * GCLOUD_FM_COLLECTOR_ID: A unique collector ID matching the remotecfg id argument value.

	// Write logs to your Grafana Cloud Loki instance.
	loki.write "grafana_cloud_loki" {
		endpoint {
			url = "https://logs-prod-012.grafana.net/loki/api/v1/push"

			basic_auth {
				username = "XXXXXX"
				password = sys.env("GCLOUD_RW_API_KEY")
			}
		}
	}

	// Read Alloy logs when running as a systemd service with the following additional labels:
	//   * job: "integrations/alloy" is compatible with Grafana Cloud's Alloy Health Integrations.
	//   * collector_id: The unique collector ID matching the remotecfg id argument value.
	//                   Used to match collector-specific metrics to power the 'Collector
	//                   Health' section of the Fleet Management UI.
	loki.source.journal "alloy_logs_unit" {
		matches    = "_SYSTEMD_UNIT=alloy.service"
		forward_to = [loki.write.grafana_cloud_loki.receiver]
		labels     = {"job" = "integrations/alloy", "collector_id" = sys.env("GCLOUD_FM_COLLECTOR_ID")}
	}

	// Read Alloy logs from syslog with the following additional labels:
	//   * job: "integrations/alloy" is compatible with Grafana Cloud's Alloy Health Integrations.
	//   * collector_id: The unique collector ID matching the remotecfg id argument value.
	//                   Used to match collector-specific metrics to power the 'Collector
	//                   Health' section of the Fleet Management UI.
	loki.source.journal "alloy_logs_tag" {
		matches    = "SYSLOG_IDENTIFIER=alloy"
		forward_to = [loki.write.grafana_cloud_loki.receiver]
		labels     = {"job" = "integrations/alloy", "collector_id" = sys.env("GCLOUD_FM_COLLECTOR_ID")}
	}
}

self_monitoring_logs_linux "default" { }

declare "self_monitoring_metrics" {

	// THIS IS A GENERATED REMOTE CONFIGURATION.
	//
	//   * You can edit the contents and matchers for this configuration without them being overwritten.
	//   * If you delete ALL generated configurations, the latest default versions will be recreated.
	//   * This configuration requires the following environment variables to be set wherever alloy is running:
	//     * GCLOUD_RW_API_KEY: The Grafana Cloud API key with write access to Loki.
	//     * GCLOUD_FM_COLLECTOR_ID: A unique collector ID matching the remotecfg id argument value.

	// Export Alloy metrics in memory.
	prometheus.exporter.self "integrations_alloy_health" { }

	// Target Alloy metrics with the following additional labels:
	//   * job: "integrations/alloy" is compatible with Grafana Cloud's Alloy Health Integrations.
	//   * collector_id: The unique collector ID matching the remotecfg id argument value.
	//                   Used to match collector-specific metrics to power the 'Collector
	//                   Health' section of the Fleet Management UI.
	//   * instance: The hostname of the machine running Alloy.
	discovery.relabel "integrations_alloy_health" {
		targets = prometheus.exporter.self.integrations_alloy_health.targets

		rule {
			action       = "replace"
			target_label = "collector_id"
			replacement  = sys.env("GCLOUD_FM_COLLECTOR_ID")
		}

		rule {
			target_label = "instance"
			replacement  = constants.hostname
		}

		rule {
			target_label = "job"
			replacement  = "integrations/alloy"
		}
	}

	// Scrape Alloy metrics and forward them to the remote write component.
	prometheus.scrape "integrations_alloy_health" {
		targets = array.concat(
			discovery.relabel.integrations_alloy_health.output,
		)
		forward_to = [prometheus.remote_write.default.receiver]
		job_name   = "integrations/alloy"
	}

	// Write metrics to your Grafana Cloud Prometheus instance.
	prometheus.remote_write "default" {
		endpoint {
			url = "https://prometheus-prod-24-prod-eu-west-2.grafana.net/api/prom/push"

			basic_auth {
				username = "XXXXXX"
				password = sys.env("GCLOUD_RW_API_KEY")
			}
		}
	}
}

self_monitoring_metrics "default" { }

Logs


@chreniuc chreniuc added the bug Something isn't working label Dec 30, 2024
@dehaansa
Copy link
Contributor

This is the same general problem as was raised here #2281 - Fleet Management does not work out of the box with Integrations right now. It will soon! There should be some remediation instructions in that issue.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug Something isn't working
Projects
None yet
Development

No branches or pull requests

2 participants