diff --git a/api/dash0monitoring/v1alpha1/operator_configuration_types.go b/api/dash0monitoring/v1alpha1/operator_configuration_types.go
index 19d2f6f6..2888898d 100644
--- a/api/dash0monitoring/v1alpha1/operator_configuration_types.go
+++ b/api/dash0monitoring/v1alpha1/operator_configuration_types.go
@@ -37,6 +37,12 @@ type Dash0OperatorConfigurationSpec struct {
 	//
 	// +kubebuilder:default=true
 	KubernetesInfrastructureMetricsCollectionEnabled *bool `json:"kubernetesInfrastructureMetricsCollectionEnabled,omitempty"`
+
+	// If set, the value will be added as the resource attribute k8s.cluster.name to all telemetry. This setting is
+	// optional. By default, k8s.cluster.name will not be added to telemetry.
+	//
+	// +kubebuilder:validation:Optional
+	ClusterName string `json:"clusterName,omitempty"`
 }
 
 // SelfMonitoring describes how the operator will report telemetry about its working to the backend.
diff --git a/cmd/main.go b/cmd/main.go
index 1dfffdf3..823e9850 100644
--- a/cmd/main.go
+++ b/cmd/main.go
@@ -128,6 +128,7 @@ func main() {
 	var operatorConfigurationApiEndpoint string
 	var operatorConfigurationSelfMonitoringEnabled bool
 	var operatorConfigurationKubernetesInfrastructureMetricsCollectionEnabled bool
+	var operatorConfigurationClusterName string
 	var isUninstrumentAll bool
 	var metricsAddr string
 	var enableLeaderElection bool
@@ -194,6 +195,13 @@ func main() {
 		true,
 		"Whether to set kubernetesInfrastructureMetricsCollectionEnabled on the operator configuration resource; "+
 			"will be ignored if operator-configuration-endpoint is not set.")
+	flag.StringVar(
+		&operatorConfigurationClusterName,
+		"operator-configuration-cluster-name",
+		"",
+		"The clusterName to set on the operator configuration resource; will be ignored if"+
+			"operator-configuration-endpoint is not set. If set, the value will be added as the resource attribute "+
+			"k8s.cluster.name to all telemetry.")
 	flag.StringVar(
 		&metricsAddr,
 		"metrics-bind-address",
@@ -309,6 +317,7 @@ func main() {
 			SelfMonitoringEnabled: operatorConfigurationSelfMonitoringEnabled,
 			//nolint:lll
 			KubernetesInfrastructureMetricsCollectionEnabled: operatorConfigurationKubernetesInfrastructureMetricsCollectionEnabled,
+			ClusterName: operatorConfigurationClusterName,
 		}
 		if len(operatorConfigurationApiEndpoint) > 0 {
 			operatorConfiguration.ApiEndpoint = operatorConfigurationApiEndpoint
diff --git a/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml b/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml
index ef44ca7b..171166cc 100644
--- a/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml
+++ b/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml
@@ -41,6 +41,11 @@ spec:
             description: Dash0OperatorConfigurationSpec describes cluster-wide configuration
               settings for the Dash0 operator.
             properties:
+              clusterName:
+                description: |-
+                  If set, the value will be added as the resource attribute k8s.cluster.name to all telemetry. This setting is
+                  optional. By default, k8s.cluster.name will not be added to telemetry.
+                type: string
               export:
                 description: |-
                   The configuration of the default observability backend to which telemetry data will be sent by the operator, as
diff --git a/helm-chart/dash0-operator/README.md b/helm-chart/dash0-operator/README.md
index fe38b30d..a9977868 100644
--- a/helm-chart/dash0-operator/README.md
+++ b/helm-chart/dash0-operator/README.md
@@ -71,6 +71,7 @@ helm install \
   --set operator.dash0Export.endpoint=REPLACE THIS WITH YOUR DASH0 INGRESS ENDPOINT \
   --set operator.dash0Export.apiEndpoint=REPLACE THIS WITH YOUR DASH0 API ENDPOINT \
   --set operator.dash0Export.token=REPLACE THIS WITH YOUR DASH0 AUTH TOKEN \
+  --set operator.clusterName=REPLACE THIS WITH YOUR THE NAME OF THE CLUSTER (OPTIONAL) \
   dash0-operator \
   dash0-operator/dash0-operator
 ```
@@ -86,6 +87,7 @@ helm install \
   --set operator.dash0Export.apiEndpoint=REPLACE THIS WITH YOUR DASH0 API ENDPOINT \
   --set operator.dash0Export.secretRef.name=REPLACE THIS WITH THE NAME OF AN EXISTING KUBERNETES SECRET \
   --set operator.dash0Export.secretRef.key=REPLACE THIS WITH THE PROPERTY KEY IN THAT SECRET \
+  --set operator.clusterName=REPLACE THIS WITH YOUR THE NAME OF THE CLUSTER (OPTIONAL) \
   dash0-operator \
   dash0-operator/dash0-operator
 ```
@@ -116,7 +118,7 @@ That is, providing `--set operator.dash0Export.enabled=true` and the other backe
 On its own, the operator will not do much.
 To actually have the operator monitor your cluster, two more things need to be set up:
 1. a [Dash0 backend connection](#configuring-the-dash0-backend-connection) has to be configured and
-2. monitoring workloads has to be [enabled per namespace](#enable-dash0-monitoring-for-a-namespace).
+2. monitoring workloads and collecting metrics has to be [enabled per namespace](#enable-dash0-monitoring-for-a-namespace).
 
 Both steps are described in the following sections.
 
@@ -147,6 +149,8 @@ spec:
         token: auth_... # TODO needs to be replaced with the actual value, see below
 
       apiEndpoint: https://api.....dash0.com # TODO needs to be replaced with the actual value, see below
+
+  clusterName: my-kubernetes-cluster # optional, see below
 ```
 
 Here is a list of configuration options for this resource:
@@ -190,6 +194,8 @@ Here is a list of configuration options for this resource:
 * `spec.kubernetesInfrastructureMetricsCollectionEnabled`: If enabled, the operator will collect Kubernetes
   infrastructure metrics.
   This setting is optional, it defaults to true.
+* `spec.clusterName`: If set, the value will be added as the resource attribute `k8s.cluster.name` to all telemetry.
+  This setting is optional. By default, `k8s.cluster.name` will not be added to telemetry.
 
 After providing the required values (at least `endpoint` and `authorization`), save the file and apply the resource to
 the Kubernetes cluster you want to monitor:
@@ -228,6 +234,9 @@ If you want to monitor the `default` namespace with Dash0, use the following com
 kubectl apply -f dash0-monitoring.yaml
 ```
 
+Note: Collecting Kubernetes infrastructure metrics (which are not neccessarily related to specific workloads or
+namespaces) also requires that at least one namespace has a Dash0Monitoring resource.
+
 ### Additional Configuration Per Namespace
 
 The Dash0 monitoring resource supports additional configuration settings:
@@ -452,6 +461,8 @@ spec:
 
 ### Configure Metrics Collection
 
+Note: Collecting metrics requires that at least one namespace has a Dash0Monitoring resource.
+
 By default, the operator collects metrics as follows:
 * The operator collects node, pod, container, and volume metrics from the API server on
   [kubelets](https://kubernetes.io/docs/concepts/architecture/#kubelet)
@@ -461,9 +472,8 @@ By default, the operator collects metrics as follows:
   via the
   [Kubernetes Cluster Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/k8sclusterreceiver/README.md)
   This can be disabled per cluster by setting `kubernetesInfrastructureMetricsCollectionEnabled: false` in the Dash0
-  operator configuration resource (or by using
-  `--operator-configuration-kubernetes-infrastructure-metrics-collection-enabled=false` when deploying the operator
-  configuration resource via the Helm chart).
+  operator configuration resource (or setting the value `operator.kubernetesInfrastructureMetricsCollectionEnabled` to
+  `false` when deploying the operator configuration resource via the Helm chart).
 * The Dash0 operator scrapes Prometheus endpoints on pods annotated with the `prometheus.io/*` annotations, as
   described in the section [Scraping Prometheus endpoints](#scraping-prometheus-endpoints). This can be disabled per
   namespace by explicitly setting `prometheusScrapingEnabled: false` in the Dash0 monitoring resource.
diff --git a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml
index 9f878c8d..f6bc669c 100644
--- a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml
+++ b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml
@@ -41,6 +41,11 @@ spec:
             description: Dash0OperatorConfigurationSpec describes cluster-wide configuration
               settings for the Dash0 operator.
             properties:
+              clusterName:
+                description: |-
+                  If set, the value will be added as the resource attribute k8s.cluster.name to all telemetry. This setting is
+                  optional. By default, k8s.cluster.name will not be added to telemetry.
+                type: string
               export:
                 description: |-
                   The configuration of the default observability backend to which telemetry data will be sent by the operator, as
diff --git a/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml b/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml
index 9b5efdcb..952a9cc5 100644
--- a/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml
+++ b/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml
@@ -107,7 +107,10 @@ spec:
 {{- end }}
         - --operator-configuration-self-monitoring-enabled={{ .Values.operator.selfMonitoringEnabled }}
         - --operator-configuration-kubernetes-infrastructure-metrics-collection-enabled={{ .Values.operator.kubernetesInfrastructureMetricsCollectionEnabled }}
+{{- if .Values.operator.clusterName }}
+        - --operator-configuration-cluster-name={{ .Values.operator.clusterName }}
 {{- end }}
+{{- end }} # closes if .Values.operator.dash0Export.enabled
 {{- if .Values.operator.dash0Export.dataset }}
         - --operator-configuration-dataset={{ .Values.operator.dash0Export.dataset }}
 {{- end }}
diff --git a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap
index 6258db7f..31d6df28 100644
--- a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap
+++ b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap
@@ -40,6 +40,11 @@ custom resource definition should match snapshot:
                 spec:
                   description: Dash0OperatorConfigurationSpec describes cluster-wide configuration settings for the Dash0 operator.
                   properties:
+                    clusterName:
+                      description: |-
+                        If set, the value will be added as the resource attribute k8s.cluster.name to all telemetry. This setting is
+                        optional. By default, k8s.cluster.name will not be added to telemetry.
+                      type: string
                     export:
                       description: |-
                         The configuration of the default observability backend to which telemetry data will be sent by the operator, as
diff --git a/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml b/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml
index 1f76559d..a2a20184 100644
--- a/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml
+++ b/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml
@@ -301,6 +301,38 @@ tests:
           path: spec.template.spec.containers[0].args[8]
           value: --operator-configuration-dataset=test-dataset
 
+  - it: should add args for creating an operator configuration resource with a cluster name to the deployment
+    documentSelector:
+      path: metadata.name
+      value: dash0-operator-controller
+    set:
+      operator:
+        dash0Export:
+          enabled: true
+          endpoint: https://ingress.dash0.com
+          token: "very-secret-dash0-auth-token"
+          apiEndpoint: https://api.dash0.com
+        clusterName: "cluster-name"
+    asserts:
+      - equal:
+          path: spec.template.spec.containers[0].args[3]
+          value: --operator-configuration-endpoint=https://ingress.dash0.com
+      - equal:
+          path: spec.template.spec.containers[0].args[4]
+          value: --operator-configuration-token=very-secret-dash0-auth-token
+      - equal:
+          path: spec.template.spec.containers[0].args[5]
+          value: --operator-configuration-api-endpoint=https://api.dash0.com
+      - equal:
+          path: spec.template.spec.containers[0].args[6]
+          value: --operator-configuration-self-monitoring-enabled=true
+      - equal:
+          path: spec.template.spec.containers[0].args[7]
+          value: --operator-configuration-kubernetes-infrastructure-metrics-collection-enabled=true
+      - equal:
+          path: spec.template.spec.containers[0].args[8]
+          value: --operator-configuration-cluster-name=cluster-name
+
   - it: should add args for creating an operator configuration resource with a secretRef to the deployment
     documentSelector:
       path: metadata.name
diff --git a/helm-chart/dash0-operator/values.yaml b/helm-chart/dash0-operator/values.yaml
index 07a22d56..965b42ec 100644
--- a/helm-chart/dash0-operator/values.yaml
+++ b/helm-chart/dash0-operator/values.yaml
@@ -80,6 +80,13 @@ operator:
   # resource will be created by the Helm chart then.
   kubernetesInfrastructureMetricsCollectionEnabled: true
 
+  # If set, the value will be added as the resource attribute k8s.cluster.name to all telemetry. This setting is
+  # optional. Per default, the resource attribute k8s.cluster.name will not be added.
+  #
+  # This setting has no effect if operator.dash0Export.enabled is false, as no Dash0OperatorConfiguration
+  # resource will be created by the Helm chart then.
+  clusterName: ""
+
   # number of replica for the controller manager deployment
   replicaCount: 1
 
diff --git a/images/instrumentation/test/build_time_profiling b/images/instrumentation/test/build_time_profiling
index f8002da7..196d8fd2 100644
--- a/images/instrumentation/test/build_time_profiling
+++ b/images/instrumentation/test/build_time_profiling
@@ -9,7 +9,7 @@ store_build_step_duration() {
   local step_label=$1
   local start=$2
   local end=$(date +%s)
-  local duration=$(($end-$start))
+  local duration=$((end - start))
   all_build_step_times["$step_label"]="$duration"
 }
 
@@ -17,20 +17,20 @@ print_build_step_duration() {
   local step_label=$1
   local start=$2
   local end=$(date +%s)
-  local duration=$(($end-$start))
+  local duration=$((end - start))
   printf "[build time] $step_label:"'\t'"$(print_time "$duration")"'\n'
 }
 
 print_total_build_time_info() {
   local total_build_end=$(date +%s)
-  local total_build_duration=$(($total_build_end-$start_time_build))
+  local total_build_duration=$((total_build_end - start_time_build))
   local accounted_for_total=0
   echo
   echo "**build step durations**"
   for label in "${!all_build_step_times[@]}"; do
     local d="${all_build_step_times[$label]}"
     printf "[build time] $label:"'\t'"$(print_time "$d")"'\n'
-    accounted_for_total=$(($accounted_for_total+$d))
+    accounted_for_total=$((accounted_for_total + d))
   done
 
   echo ----------------------------------------
@@ -38,8 +38,8 @@ print_total_build_time_info() {
   print_build_step_duration "**total build time**" "$start_time_build"
 
   # check that we are actually measuring all relevant build steps:
-  local unaccounted=$(($total_build_duration-$accounted_for_total))
-  printf "[build time] build time account for by individual build steps:"'\t'"$(print_time "$accounted_for_total")"'\n'
+  local unaccounted=$((total_build_duration - accounted_for_total))
+  printf "[build time] build time accounted for by individual build steps:"'\t'"$(print_time "$accounted_for_total")"'\n'
   printf "[build time] build time unaccounted for by individual build steps:"'\t'"$(print_time "$unaccounted")"'\n'
 }
 
diff --git a/images/instrumentation/test/test-all.sh b/images/instrumentation/test/test-all.sh
index defec2db..736dc50f 100755
--- a/images/instrumentation/test/test-all.sh
+++ b/images/instrumentation/test/test-all.sh
@@ -28,6 +28,7 @@ all_docker_platforms=linux/arm64,linux/amd64
 script_dir="test"
 exit_code=0
 summary=""
+slow_test_threshold_seconds=15
 
 build_or_pull_instrumentation_image() {
   # shellcheck disable=SC2155
@@ -68,10 +69,15 @@ build_or_pull_instrumentation_image() {
 }
 
 run_tests_for_runtime() {
-  local runtime="${1:-}"
-  local image_name_test="${2:-}"
-  local base_image="${3:?}"
+  local docker_platform="${1:-}"
+  local runtime="${2:-}"
+  local image_name_test="${3:-}"
+  local base_image="${4:-}"
 
+  if [[ -z $docker_platform ]]; then
+    echo "missing parameter: docker_platform"
+    exit 1
+  fi
   if [[ -z $runtime ]]; then
     echo "missing parameter: runtime"
     exit 1
@@ -80,6 +86,10 @@ run_tests_for_runtime() {
     echo "missing parameter: image_name_test"
     exit 1
   fi
+  if [[ -z $base_image ]]; then
+    echo "missing parameter: base_image"
+    exit 1
+  fi
 
   for t in "${script_dir}"/"${runtime}"/test-cases/*/ ; do
     # shellcheck disable=SC2155
@@ -103,6 +113,7 @@ run_tests_for_runtime() {
     esac
 
     if docker_run_output=$(docker run \
+      --platform "$docker_platform" \
       --env-file="${script_dir}/${runtime}/test-cases/${test}/.env" \
       "$image_name_test" \
       "${test_cmd[@]}" \
@@ -116,6 +127,16 @@ run_tests_for_runtime() {
       exit_code=1
       summary="$summary\n${runtime}/${base_image}\t- ${test}:\tfailed"
     fi
+
+    # shellcheck disable=SC2155
+    local end_time_test_case=$(date +%s)
+    # shellcheck disable=SC2155
+    local duration_test_case=$((end_time_test_case - start_time_test_case))
+    if [[ "$duration_test_case" -gt "$slow_test_threshold_seconds" ]]; then
+      echo "! slow test case: $image_name_test/$base_image/$test: took $duration_test_case seconds, logging output:"
+      echo "$docker_run_output"
+    fi
+
     store_build_step_duration "test case $image_name_test/$base_image/$test" "$start_time_test_case"
   done
 }
@@ -162,7 +183,7 @@ run_tests_for_architecture() {
         exit 1
       fi
       store_build_step_duration "docker build $arch/$runtime/$base_image" "$start_time_docker_build"
-      run_tests_for_runtime "${runtime}" "$image_name_test" "$base_image"
+      run_tests_for_runtime "$docker_platform" "${runtime}" "$image_name_test" "$base_image"
       echo
     done
   done
diff --git a/internal/backendconnection/otelcolresources/collector_config_maps.go b/internal/backendconnection/otelcolresources/collector_config_maps.go
index 82dc9b95..1d2905a3 100644
--- a/internal/backendconnection/otelcolresources/collector_config_maps.go
+++ b/internal/backendconnection/otelcolresources/collector_config_maps.go
@@ -20,6 +20,7 @@ type collectorConfigurationTemplateValues struct {
 	Exporters                                        []OtlpExporter
 	IgnoreLogsFromNamespaces                         []string
 	KubernetesInfrastructureMetricsCollectionEnabled bool
+	ClusterName                                      string
 	NamespacesWithPrometheusScraping                 []string
 	SelfIpReference                                  string
 	DevelopmentMode                                  bool
@@ -102,9 +103,10 @@ func assembleCollectorConfigMap(
 					config.Namespace,
 				},
 				KubernetesInfrastructureMetricsCollectionEnabled: config.KubernetesInfrastructureMetricsCollectionEnabled,
-				NamespacesWithPrometheusScraping:                 namespacesWithPrometheusScraping,
-				SelfIpReference:                                  selfIpReference,
-				DevelopmentMode:                                  config.DevelopmentMode,
+				ClusterName:                      config.ClusterName,
+				NamespacesWithPrometheusScraping: namespacesWithPrometheusScraping,
+				SelfIpReference:                  selfIpReference,
+				DevelopmentMode:                  config.DevelopmentMode,
 			})
 		if err != nil {
 			return nil, fmt.Errorf("cannot render the collector configuration template: %w", err)
diff --git a/internal/backendconnection/otelcolresources/collector_config_maps_test.go b/internal/backendconnection/otelcolresources/collector_config_maps_test.go
index eff611f1..dfc2ed61 100644
--- a/internal/backendconnection/otelcolresources/collector_config_maps_test.go
+++ b/internal/backendconnection/otelcolresources/collector_config_maps_test.go
@@ -38,27 +38,28 @@ var (
 
 var _ = Describe("The OpenTelemetry Collector ConfigMaps", func() {
 
+	testConfigs := []TableEntry{
+		Entry(
+			"for the DaemonSet",
+			testConfig{
+				assembleConfigMapFunction: assembleDaemonSetCollectorConfigMapWithoutScrapingNamespaces,
+				pipelineNames: []string{
+					"traces/downstream",
+					"metrics/downstream",
+					"logs/downstream",
+				},
+			}),
+		Entry(
+			"for the Deployment",
+			testConfig{
+				assembleConfigMapFunction: assembleDeploymentCollectorConfigMap,
+				pipelineNames: []string{
+					"metrics/downstream",
+				},
+			}),
+	}
+
 	Describe("renders exporters", func() {
-		testConfigs := []TableEntry{
-			Entry(
-				"for the DaemonSet",
-				testConfig{
-					assembleConfigMapFunction: assembleDaemonSetCollectorConfigMapWithoutScrapingNamespaces,
-					pipelineNames: []string{
-						"traces/downstream",
-						"metrics/downstream",
-						"logs/downstream",
-					},
-				}),
-			Entry(
-				"for the Deployment",
-				testConfig{
-					assembleConfigMapFunction: assembleDeploymentCollectorConfigMap,
-					pipelineNames: []string{
-						"metrics/downstream",
-					},
-				}),
-		}
 
 		DescribeTable("should fail if no exporter is configured", func(testConfig testConfig) {
 			_, err := testConfig.assembleConfigMapFunction(&oTelColConfig{
@@ -608,6 +609,71 @@ var _ = Describe("The OpenTelemetry Collector ConfigMaps", func() {
 		}, testConfigs)
 	})
 
+	DescribeTable("should not render resource processor if the cluster name has not been set", func(testConfig testConfig) {
+		configMap, err := testConfig.assembleConfigMapFunction(&oTelColConfig{
+			Namespace:  namespace,
+			NamePrefix: namePrefix,
+			Export: dash0v1alpha1.Export{
+				Dash0: &dash0v1alpha1.Dash0Configuration{
+					Endpoint: EndpointDash0Test,
+					Authorization: dash0v1alpha1.Authorization{
+						Token: &AuthorizationTokenTest,
+					},
+				},
+			},
+		}, false)
+
+		Expect(err).ToNot(HaveOccurred())
+		collectorConfig := parseConfigMapContent(configMap)
+		resourceProcessor := readFromMap(collectorConfig, []string{"processors", "resource"})
+		Expect(resourceProcessor).To(BeNil())
+		verifyProcessorDoesNotAppearInAnyPipeline(collectorConfig, "resource")
+		selfMonitoringTelemetryResource := readFromMap(
+			collectorConfig,
+			[]string{
+				"service",
+				"telemetry",
+				"resource",
+			})
+		Expect(selfMonitoringTelemetryResource).To(BeNil())
+	}, testConfigs)
+
+	DescribeTable("should render resource processor with k8s.cluster.name if available", func(testConfig testConfig) {
+		configMap, err := testConfig.assembleConfigMapFunction(&oTelColConfig{
+			Namespace:  namespace,
+			NamePrefix: namePrefix,
+			Export: dash0v1alpha1.Export{
+				Dash0: &dash0v1alpha1.Dash0Configuration{
+					Endpoint: EndpointDash0Test,
+					Authorization: dash0v1alpha1.Authorization{
+						Token: &AuthorizationTokenTest,
+					},
+				},
+			},
+			ClusterName: "cluster-name",
+		}, false)
+
+		Expect(err).ToNot(HaveOccurred())
+		collectorConfig := parseConfigMapContent(configMap)
+		resourceProcessor := readFromMap(collectorConfig, []string{"processors", "resource"})
+		Expect(resourceProcessor).ToNot(BeNil())
+		attributes := readFromMap(resourceProcessor, []string{"attributes"})
+		Expect(attributes).To(HaveLen(1))
+		attrs := attributes.([]interface{})
+		Expect(attrs[0].(map[string]interface{})["key"]).To(Equal("k8s.cluster.name"))
+		Expect(attrs[0].(map[string]interface{})["value"]).To(Equal("cluster-name"))
+		Expect(attrs[0].(map[string]interface{})["action"]).To(Equal("insert"))
+		selfMonitoringTelemetryResource := readFromMap(
+			collectorConfig,
+			[]string{
+				"service",
+				"telemetry",
+				"resource",
+			})
+		Expect(selfMonitoringTelemetryResource).ToNot(BeNil())
+		Expect(selfMonitoringTelemetryResource.(map[string]interface{})["k8s.cluster.name"]).To(Equal("cluster-name"))
+	}, testConfigs)
+
 	Describe("should enable/disable kubernetes infrastructure metrics collection", func() {
 		It("should not render the kubeletstats receiver if kubernetes infrastructure metrics collection is disabled", func() {
 			configMap, err := assembleDaemonSetCollectorConfigMap(&oTelColConfig{
@@ -786,6 +852,19 @@ func verifyDownstreamExportersInPipelines(
 	}
 }
 
+func verifyProcessorDoesNotAppearInAnyPipeline(
+	collectorConfig map[string]interface{},
+	processorName ...string,
+) {
+	pipelines := readPipelines(collectorConfig)
+	Expect(pipelines).ToNot(BeNil())
+	for pipelineName := range pipelines {
+		Expect(pipelineName).ToNot(BeNil())
+		processors := readPipelineProcessors(pipelines, pipelineName)
+		Expect(processors).ToNot(ContainElements(processorName))
+	}
+}
+
 func verifyScrapeJobHasNamespaces(collectorConfig map[string]interface{}, jobName string) {
 	namespacesKubernetesPodsRaw :=
 		readFromMap(
@@ -818,6 +897,10 @@ func readPipelineReceivers(pipelines map[string]interface{}, pipelineName string
 	return readPipelineList(pipelines, pipelineName, "receivers")
 }
 
+func readPipelineProcessors(pipelines map[string]interface{}, pipelineName string) []interface{} {
+	return readPipelineList(pipelines, pipelineName, "processors")
+}
+
 func readPipelineExporters(pipelines map[string]interface{}, pipelineName string) []interface{} {
 	return readPipelineList(pipelines, pipelineName, "exporters")
 }
diff --git a/internal/backendconnection/otelcolresources/daemonset.config.yaml.template b/internal/backendconnection/otelcolresources/daemonset.config.yaml.template
index 45e1b424..39468e6a 100644
--- a/internal/backendconnection/otelcolresources/daemonset.config.yaml.template
+++ b/internal/backendconnection/otelcolresources/daemonset.config.yaml.template
@@ -104,6 +104,14 @@ processors:
     - sources:
       - from: connection
 
+  {{- if .ClusterName }}
+  resource:
+    attributes:
+      - key: k8s.cluster.name
+        value: "{{ .ClusterName }}"
+        action: insert
+  {{- end }}
+
   memory_limiter:
     check_interval: 5s
     limit_percentage: 80
@@ -349,6 +357,9 @@ service:
       processors:
       - resourcedetection
       - k8sattributes
+      {{- if .ClusterName }}
+      - resource
+      {{- end }}
       - memory_limiter
       - batch
       exporters:
@@ -371,6 +382,9 @@ service:
       processors:
       - resourcedetection
       - k8sattributes
+      {{- if .ClusterName }}
+      - resource
+      {{- end }}
       - memory_limiter
       - batch
       exporters:
@@ -403,6 +417,9 @@ service:
       - forward/logs
       processors:
       - resourcedetection
+      {{- if .ClusterName }}
+      - resource
+      {{- end }}
       - memory_limiter
       - batch
       exporters:
@@ -414,6 +431,10 @@ service:
       {{- end }}
 
   telemetry:
+    {{- if .ClusterName }}
+    resource:
+      "k8s.cluster.name": "{{- .ClusterName }}"
+    {{- end }}
     metrics:
       readers:
         - pull:
diff --git a/internal/backendconnection/otelcolresources/deployment.config.yaml.template b/internal/backendconnection/otelcolresources/deployment.config.yaml.template
index 30adec56..aa2c1c59 100644
--- a/internal/backendconnection/otelcolresources/deployment.config.yaml.template
+++ b/internal/backendconnection/otelcolresources/deployment.config.yaml.template
@@ -63,6 +63,14 @@ processors:
       - from: resource_attribute
         name: k8s.pod.uid
 
+  {{- if .ClusterName }}
+  resource:
+    attributes:
+      - key: k8s.cluster.name
+        value: "{{ .ClusterName }}"
+        action: insert
+  {{- end }}
+
   memory_limiter:
     check_interval: 5s
     limit_percentage: 80
@@ -85,6 +93,9 @@ service:
       - k8s_cluster
       processors:
       - k8sattributes
+      {{- if .ClusterName }}
+      - resource
+      {{- end }}
       - memory_limiter
       - batch
       exporters:
@@ -96,6 +107,10 @@ service:
       {{- end }}
 
   telemetry:
+    {{- if .ClusterName }}
+    resource:
+      "k8s.cluster.name": "{{- .ClusterName }}"
+    {{- end }}
     metrics:
       readers:
         - pull:
diff --git a/internal/backendconnection/otelcolresources/desired_state.go b/internal/backendconnection/otelcolresources/desired_state.go
index 6bf88eec..036ee8af 100644
--- a/internal/backendconnection/otelcolresources/desired_state.go
+++ b/internal/backendconnection/otelcolresources/desired_state.go
@@ -28,6 +28,7 @@ type oTelColConfig struct {
 	Export                                           dash0v1alpha1.Export
 	SelfMonitoringAndApiAccessConfiguration          selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration
 	KubernetesInfrastructureMetricsCollectionEnabled bool
+	ClusterName                                      string
 	Images                                           util.Images
 	IsIPv6Cluster                                    bool
 	DevelopmentMode                                  bool
@@ -1084,9 +1085,10 @@ func labels(addOptOutLabel bool) map[string]string {
 }
 
 func addCommonMetadata(object client.Object) clientObject {
-	// For clusters managed by ArgoCD, we need to prevent ArgoCD to prune resources that have no owner reference
-	// which are all cluster-scoped resources, like cluster roles & cluster role bindings. We could add the annotation
-	// to achieve that only to the cluster-scoped resources, but instead we just apply it to all resources we manage.
+	// For clusters managed by ArgoCD, we need to prevent ArgoCD to sync or prune resources that have no owner
+	// reference, which are all cluster-scoped resources, like cluster roles & cluster role bindings. We could add the
+	// annotation to achieve that only to the cluster-scoped resources, but instead we just apply it to all resources we
+	// manage.
 	// * https://github.com/argoproj/argo-cd/issues/4764#issuecomment-722661940 -- this is where they say that only top
 	//   level resources are pruned (that is basically the same as resources without an owner reference).
 	// * The docs for preventing this on a resource level are here:
diff --git a/internal/backendconnection/otelcolresources/otelcol_resources.go b/internal/backendconnection/otelcolresources/otelcol_resources.go
index aa8985f2..d88546d2 100644
--- a/internal/backendconnection/otelcolresources/otelcol_resources.go
+++ b/internal/backendconnection/otelcolresources/otelcol_resources.go
@@ -115,9 +115,11 @@ func (m *OTelColResourceManager) CreateOrUpdateOpenTelemetryCollectorResources(
 	}
 
 	kubernetesInfrastructureMetricsCollectionEnabled := true
+	clusterName := ""
 	if operatorConfigurationResource != nil {
 		kubernetesInfrastructureMetricsCollectionEnabled =
 			util.ReadBoolPointerWithDefault(operatorConfigurationResource.Spec.KubernetesInfrastructureMetricsCollectionEnabled, true)
+		clusterName = operatorConfigurationResource.Spec.ClusterName
 	}
 
 	config := &oTelColConfig{
@@ -126,6 +128,7 @@ func (m *OTelColResourceManager) CreateOrUpdateOpenTelemetryCollectorResources(
 		Export:                                  *export,
 		SelfMonitoringAndApiAccessConfiguration: selfMonitoringConfiguration,
 		KubernetesInfrastructureMetricsCollectionEnabled: kubernetesInfrastructureMetricsCollectionEnabled,
+		ClusterName:     clusterName,
 		Images:          images,
 		IsIPv6Cluster:   m.IsIPv6Cluster,
 		DevelopmentMode: m.DevelopmentMode,
diff --git a/internal/startup/auto_operator_configuration_handler.go b/internal/startup/auto_operator_configuration_handler.go
index 2e2f5e5a..da154ab7 100644
--- a/internal/startup/auto_operator_configuration_handler.go
+++ b/internal/startup/auto_operator_configuration_handler.go
@@ -34,6 +34,7 @@ type OperatorConfigurationValues struct {
 	Dataset                                          string
 	SelfMonitoringEnabled                            bool
 	KubernetesInfrastructureMetricsCollectionEnabled bool
+	ClusterName                                      string
 }
 
 type AutoOperatorConfigurationResourceHandler struct {
@@ -205,15 +206,18 @@ func (r *AutoOperatorConfigurationResourceHandler) createOperatorConfigurationRe
 		ObjectMeta: metav1.ObjectMeta{
 			Name: operatorConfigurationAutoResourceName,
 			Annotations: map[string]string{
-				// For clusters managed by ArgoCD, we need to prevent ArgoCD to prune resources that are not created
-				// directly via the Helm chart and that have no owner reference. These are all cluster-scoped resources
-				// not created via Helm, like cluster roles & cluster role bindings. See also:
+				// For clusters managed by ArgoCD, we need to prevent ArgoCD to sync or prune resources that are not
+				// created directly via the Helm chart and that have no owner reference. These are all cluster-scoped
+				// resources not created via Helm, like cluster roles & cluster role bindings, but also the operator
+				// configuration resource we create here. See also:
 				// * https://github.com/argoproj/argo-cd/issues/4764#issuecomment-722661940 -- this is where they say
 				//   that only top level resources are pruned (that is basically the same as resources without an owner
 				//   reference).
 				// * The docs for preventing this on a resource level are here:
 				//   https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#no-prune-resources
-				"argocd.argoproj.io/sync-options": "Prune=false",
+				//   https://argo-cd.readthedocs.io/en/stable/user-guide/compare-options/#ignoring-resources-that-are-extraneous
+				"argocd.argoproj.io/sync-options":    "Prune=false",
+				"argocd.argoproj.io/compare-options": "IgnoreExtraneous",
 			},
 		},
 		Spec: dash0v1alpha1.Dash0OperatorConfigurationSpec{
@@ -222,6 +226,7 @@ func (r *AutoOperatorConfigurationResourceHandler) createOperatorConfigurationRe
 			},
 			Export: &dash0Export,
 			KubernetesInfrastructureMetricsCollectionEnabled: ptr.To(operatorConfiguration.KubernetesInfrastructureMetricsCollectionEnabled),
+			ClusterName: operatorConfiguration.ClusterName,
 		},
 	}
 
diff --git a/internal/startup/auto_operator_configuration_handler_test.go b/internal/startup/auto_operator_configuration_handler_test.go
index 75cdd079..721cf9f9 100644
--- a/internal/startup/auto_operator_configuration_handler_test.go
+++ b/internal/startup/auto_operator_configuration_handler_test.go
@@ -90,10 +90,12 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			}, &operatorConfiguration)
 			g.Expect(err).ToNot(HaveOccurred())
 
-			g.Expect(operatorConfiguration.Annotations).To(HaveLen(1))
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
 			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
 
-			export := operatorConfiguration.Spec.Export
+			spec := operatorConfiguration.Spec
+			export := spec.Export
 			g.Expect(export).ToNot(BeNil())
 			dash0Export := export.Dash0
 			g.Expect(dash0Export).ToNot(BeNil())
@@ -103,6 +105,10 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			g.Expect(dash0Export.Authorization.Token).ToNot(BeNil())
 			g.Expect(*dash0Export.Authorization.Token).To(Equal(AuthorizationTokenTest))
 			g.Expect(dash0Export.Authorization.SecretRef).To(BeNil())
+			g.Expect(dash0Export.Authorization.SecretRef).To(BeNil())
+			g.Expect(*spec.SelfMonitoring.Enabled).To(BeFalse())
+			g.Expect(*spec.KubernetesInfrastructureMetricsCollectionEnabled).To(BeFalse())
+			g.Expect(spec.ClusterName).To(BeEmpty())
 		}, 5*time.Second, 100*time.Millisecond).Should(Succeed())
 	})
 
@@ -118,8 +124,9 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			}, &operatorConfiguration)
 			g.Expect(err).ToNot(HaveOccurred())
 
-			g.Expect(operatorConfiguration.Annotations).To(HaveLen(1))
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
 			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
 
 			export := operatorConfiguration.Spec.Export
 			g.Expect(export).ToNot(BeNil())
@@ -151,6 +158,10 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			}, &operatorConfiguration)
 			g.Expect(err).ToNot(HaveOccurred())
 
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
+
 			export := operatorConfiguration.Spec.Export
 			g.Expect(export).ToNot(BeNil())
 			dash0Export := export.Dash0
@@ -175,6 +186,10 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			}, &operatorConfiguration)
 			g.Expect(err).ToNot(HaveOccurred())
 
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
+
 			export := operatorConfiguration.Spec.Export
 			g.Expect(export).ToNot(BeNil())
 			dash0Export := export.Dash0
@@ -183,6 +198,35 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 		}, 5*time.Second, 100*time.Millisecond).Should(Succeed())
 	})
 
+	It("should set the cluster name", func() {
+		Expect(
+			handler.CreateOrUpdateOperatorConfigurationResource(ctx, &OperatorConfigurationValues{
+				Endpoint:    EndpointDash0Test,
+				Token:       AuthorizationTokenTest,
+				ClusterName: "cluster-name",
+			}, &logger),
+		).To(Succeed())
+
+		Eventually(func(g Gomega) {
+			operatorConfiguration := v1alpha1.Dash0OperatorConfiguration{}
+			err := k8sClient.Get(ctx, types.NamespacedName{
+				Name: operatorConfigurationAutoResourceName,
+			}, &operatorConfiguration)
+			g.Expect(err).ToNot(HaveOccurred())
+
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
+
+			spec := operatorConfiguration.Spec
+			export := spec.Export
+			g.Expect(export).ToNot(BeNil())
+			dash0Export := export.Dash0
+			g.Expect(dash0Export).ToNot(BeNil())
+			g.Expect(spec.ClusterName).To(Equal("cluster-name"))
+		}, 5*time.Second, 100*time.Millisecond).Should(Succeed())
+	})
+
 	It("should update the existing resource if there already is an auto-operator-configuration-resource", func() {
 		Expect(
 			handler.CreateOrUpdateOperatorConfigurationResource(ctx, &OperatorConfigurationValues{
@@ -201,8 +245,9 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			g.Expect(list.Items).To(HaveLen(1))
 			operatorConfiguration := list.Items[0]
 			g.Expect(operatorConfiguration.Name).To(Equal(operatorConfigurationAutoResourceName))
-			g.Expect(operatorConfiguration.Annotations).To(HaveLen(1))
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
 			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
 			export := operatorConfiguration.Spec.Export
 			g.Expect(export).ToNot(BeNil())
 			dash0Export := export.Dash0
@@ -238,8 +283,9 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered
 			g.Expect(list.Items).To(HaveLen(1))
 			operatorConfiguration := list.Items[0]
 			g.Expect(operatorConfiguration.Name).To(Equal(operatorConfigurationAutoResourceName))
-			g.Expect(operatorConfiguration.Annotations).To(HaveLen(1))
+			g.Expect(operatorConfiguration.Annotations).To(HaveLen(2))
 			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/sync-options"]).To(Equal("Prune=false"))
+			g.Expect(operatorConfiguration.Annotations["argocd.argoproj.io/compare-options"]).To(Equal("IgnoreExtraneous"))
 			export := operatorConfiguration.Spec.Export
 			g.Expect(export).ToNot(BeNil())
 			dash0Export := export.Dash0
diff --git a/test-resources/bin/util b/test-resources/bin/util
index 87c315f3..9dda7d0c 100644
--- a/test-resources/bin/util
+++ b/test-resources/bin/util
@@ -76,7 +76,10 @@ check_if_kubectx_is_kind_cluster() {
   if command -v kind >/dev/null 2>&1; then
     # kind is installed, check if the current kube context is a kind cluster
     current_kubectx=$(kubectl config current-context)
-    kind_clusters=$(kind get clusters)
+    kind_clusters=$(kind get clusters 2>&1)
+    if [[ "$kind_clusters" == "No kind clusters found." ]]; then
+      return
+    fi
     while IFS= read -r cluster; do
       if [[ "$current_kubectx" == "kind-$cluster" ]]; then
         is_kind_cluster=true
@@ -195,6 +198,7 @@ run_helm() {
     if [[ "${OPERATOR_CONFIGURATION_VIA_HELM_KUBERNETES_INFRASTRUCTURE_METRICS_COLLECTION_ENABLED:-}" == false  ]]; then
       helm_install_command+=" --set operator.kubernetesInfrastructureMetricsCollectionEnabled=false"
     fi
+    helm_install_command+=" --set operator.clusterName=local-operator-test-cluster"
   fi
 
   helm_install_command+=" dash0-operator"
diff --git a/test/e2e/dash0operatorconfiguration.e2e.yaml.template b/test/e2e/dash0operatorconfiguration.e2e.yaml.template
index 9e1fee44..b389c4d8 100644
--- a/test/e2e/dash0operatorconfiguration.e2e.yaml.template
+++ b/test/e2e/dash0operatorconfiguration.e2e.yaml.template
@@ -10,3 +10,4 @@ spec:
       endpoint: {{ .Endpoint }}
       authorization:
         token: {{ .Token }}
+  clusterName: e2e-test-cluster
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
index 4cf325ee..c8206335 100644
--- a/test/e2e/e2e_test.go
+++ b/test/e2e/e2e_test.go
@@ -178,6 +178,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 						)],
 						images,
 						"controller",
+						false,
 					)
 				})
 				By("all workloads have been instrumented")
@@ -289,6 +290,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 					testId,
 					initialAlternativeImages,
 					"controller",
+					false,
 				)
 
 				// Now update the operator with the actual image names that are used throughout the whole test suite.
@@ -310,6 +312,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 					// check that the new image tags have been applied to the workload
 					images,
 					"controller",
+					false,
 				)
 			})
 		})
@@ -352,6 +355,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 					testId,
 					images,
 					"webhook",
+					false,
 				)
 			},
 			Entry("should instrument new Node.js cron jobs", workloadTypeCronjob, runtimeTypeNodeJs),
@@ -380,6 +384,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 				testId,
 				images,
 				"webhook",
+				false,
 			)
 
 			By("adding the opt-out label to the deployment")
@@ -428,6 +433,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 				testId,
 				images,
 				"webhook",
+				false,
 			)
 		})
 	})
@@ -472,6 +478,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 				testId,
 				images,
 				"webhook",
+				false,
 			)
 
 			By("verifying that removing the Dash0 monitoring resource attempts to uninstruments the job")
@@ -612,6 +619,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 				testId,
 				images,
 				"controller",
+				false,
 			)
 		})
 
@@ -634,6 +642,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 				testId,
 				images,
 				"webhook",
+				false,
 			)
 
 			By("updating the Dash0Monitoring resource to InstrumentWorkloads=none")
@@ -737,6 +746,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 					testId,
 					images,
 					"webhook",
+					true,
 				)
 			})
 		})
@@ -786,6 +796,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 					testId,
 					images,
 					"webhook",
+					true,
 				)
 			})
 		})
@@ -1034,6 +1045,7 @@ var _ = Describe("Dash0 Operator", Ordered, func() {
 						testIds[config.workloadType.workloadTypeString],
 						images,
 						"controller",
+						false,
 					)
 				})
 
diff --git a/test/e2e/logs.go b/test/e2e/logs.go
index 18fd3e31..7b15c481 100644
--- a/test/e2e/logs.go
+++ b/test/e2e/logs.go
@@ -10,7 +10,6 @@ import (
 	"strings"
 	"time"
 
-	"go.opentelemetry.io/collector/pdata/pcommon"
 	"go.opentelemetry.io/collector/pdata/plog"
 
 	. "github.com/onsi/gomega"
@@ -107,9 +106,6 @@ func countMatchingLogRecords(
 func resourceLogRecordsHaveExpectedResourceAttributes(workloadType string) func(span plog.ResourceLogs) bool {
 	return func(resourceLogs plog.ResourceLogs) bool {
 		attributes := resourceLogs.Resource().Attributes()
-		attributes.Range(func(k string, v pcommon.Value) bool {
-			return true
-		})
 
 		workloadAttributeFound := false
 		if workloadType == "replicaset" {
diff --git a/test/e2e/operator.go b/test/e2e/operator.go
index 9e22dd47..bcdd0fca 100644
--- a/test/e2e/operator.go
+++ b/test/e2e/operator.go
@@ -100,6 +100,7 @@ func deployOperatorWithAutoOperationConfiguration(
 			"operator.dash0Export.secretRef.key",
 			operatorConfigurationValues.SecretRef.Key,
 		)
+		arguments = setHelmParameter(arguments, "operator.clusterName", "e2e-test-cluster")
 	}
 
 	output, err := run(exec.Command("helm", arguments...))
diff --git a/test/e2e/spans.go b/test/e2e/spans.go
index bcd94288..aef6a306 100644
--- a/test/e2e/spans.go
+++ b/test/e2e/spans.go
@@ -18,6 +18,9 @@ import (
 const (
 	tracesJsonMaxLineLength = 1_048_576
 
+	clusterNameKey = "k8s.cluster.name"
+	podNameKey     = "k8s.pod.name"
+
 	httpTargetAttrib = "http.target"
 	httpRouteAttrib  = "http.route"
 	urlQueryAttrib   = "url.query"
@@ -33,6 +36,7 @@ func verifySpans(
 	workloadType workloadType,
 	route string,
 	query string,
+	expectClusterName bool,
 ) {
 	allMatchResults :=
 		sendRequestAndFindMatchingSpans(
@@ -43,6 +47,7 @@ func verifySpans(
 			query,
 			nil,
 			true,
+			expectClusterName,
 		)
 	allMatchResults.expectAtLeastOneMatch(
 		g,
@@ -67,6 +72,7 @@ func verifyNoSpans(
 			query,
 			&timestampLowerBound,
 			false,
+			false,
 		)
 	allMatchResults.expectZeroMatches(
 		g,
@@ -82,13 +88,14 @@ func sendRequestAndFindMatchingSpans(
 	query string,
 	timestampLowerBound *time.Time,
 	checkResourceAttributes bool,
+	expectClusterName bool,
 ) MatchResultList[ptrace.ResourceSpans, ptrace.Span] {
 	if !workloadType.isBatch {
 		sendRequest(g, runtime, workloadType, route, query)
 	}
 	var resourceMatchFn func(ptrace.ResourceSpans, *ResourceMatchResult[ptrace.ResourceSpans])
 	if checkResourceAttributes {
-		resourceMatchFn = resourceSpansHaveExpectedResourceAttributes(runtime, workloadType)
+		resourceMatchFn = resourceSpansHaveExpectedResourceAttributes(runtime, workloadType, expectClusterName)
 	}
 	return fileHasMatchingSpan(
 		g,
@@ -185,13 +192,27 @@ func hasMatchingSpans(
 }
 
 //nolint:all
-func resourceSpansHaveExpectedResourceAttributes(runtime runtimeType, workloadType workloadType) func(
+func resourceSpansHaveExpectedResourceAttributes(runtime runtimeType, workloadType workloadType, expectClusterName bool) func(
 	ptrace.ResourceSpans,
 	*ResourceMatchResult[ptrace.ResourceSpans],
 ) {
 	return func(resourceSpans ptrace.ResourceSpans, matchResult *ResourceMatchResult[ptrace.ResourceSpans]) {
 		attributes := resourceSpans.Resource().Attributes()
 
+		if expectClusterName {
+			expectedClusterName := "e2e-test-cluster"
+			actualClusterName, hasClusterNameAttribute := attributes.Get(clusterNameKey)
+			if hasClusterNameAttribute {
+				if actualClusterName.Str() == expectedClusterName {
+					matchResult.addPassedAssertion(podNameKey)
+				} else {
+					matchResult.addFailedAssertion(podNameKey, fmt.Sprintf("expected %s but it was %s", expectedClusterName, actualClusterName.Str()))
+				}
+			} else {
+				matchResult.addFailedAssertion(clusterNameKey, fmt.Sprintf("expected %s but the span has no such attribute", expectedClusterName))
+			}
+		}
+
 		// Note: On kind clusters, the workload type attribute (k8s.deployment.name) etc. is often missing. This needs
 		// to be investigated more.
 		if workloadType.workloadTypeString == "replicaset" {
@@ -210,7 +231,6 @@ func resourceSpansHaveExpectedResourceAttributes(runtime runtimeType, workloadTy
 			}
 		}
 
-		podNameKey := "k8s.pod.name"
 		expectedPodName := workloadName(runtime, workloadType)
 		expectedPodPrefix := fmt.Sprintf("%s-", expectedPodName)
 		actualPodName, hasPodAttribute := attributes.Get(podNameKey)
diff --git a/test/e2e/verify_instrumentation.go b/test/e2e/verify_instrumentation.go
index 28a67668..883bec6c 100644
--- a/test/e2e/verify_instrumentation.go
+++ b/test/e2e/verify_instrumentation.go
@@ -27,6 +27,7 @@ func verifyThatWorkloadHasBeenInstrumented(
 	testId string,
 	images Images,
 	instrumentationBy string,
+	expectClusterName bool,
 ) {
 	By(fmt.Sprintf("%s: waiting for the workload to get instrumented (polling its labels and events to check)",
 		workloadType.workloadTypeString))
@@ -61,7 +62,7 @@ func verifyThatWorkloadHasBeenInstrumented(
 	route := "/dash0-k8s-operator-test"
 	query := fmt.Sprintf("id=%s", testId)
 	Eventually(func(g Gomega) {
-		verifySpans(g, runtime, workloadType, route, query)
+		verifySpans(g, runtime, workloadType, route, query, expectClusterName)
 	}, spanTimeout, pollingInterval).Should(Succeed())
 	By(fmt.Sprintf("%s: matching spans have been received", workloadType.workloadTypeString))
 }