diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 20f49e4b9b1..9566beb09f4 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -28,6 +28,7 @@ Entries should include a reference to the Pull Request that introduced the chang ## main / unreleased +* [FEATURE] Added experimental feature for deploying [KEDA](https://keda.sh) ScaledObjects as part of the helm chart for the components: distributor, querier, query-frontend and ruler. Autoscaling can be enabled via `distributor.kedaAutoscaling`, `ruler.kedaAutoscaling`, `query_frontend.kedaAutoscaling`, and `querier.kedaAutoscaling`. Requires metamonitoring, for more details on metamonitoring see [Monitor the health of your system](https://grafana.com/docs/helm-charts/mimir-distributed/latest/run-production-environment-with-helm/monitor-system-health/). See [grafana/mimir#7367](https://github.com/grafana/mimir/issues/7367) for a migration procedure. #7282 * [CHANGE] Rollout-operator: remove default CPU limit. #7125 * [CHANGE] Ring: relaxed the hash ring heartbeat period and timeout for distributor, ingester, store-gateway and compactor: #6860 * `-distributor.ring.heartbeat-period` set to `1m` diff --git a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-metamonitoring-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-metamonitoring-values.yaml new file mode 100644 index 00000000000..cff02c4bc23 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-metamonitoring-values.yaml @@ -0,0 +1,52 @@ +# Pin kube version so results are the same for running in CI and locally where the installed kube version may be different. +kubeVersionOverride: "1.20" + +metaMonitoring: + grafanaAgent: + metrics: + enabled: false + remote: + url: https://mimir.example.com/api/v1/push # test with setting a different remote for the monitoring + +distributor: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 + +ruler: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 + +querier: + kedaAutoscaling: + enabled: true + minReplicaCount: 2 + maxReplicaCount: 10 + pollingInterval: 10 + querySchedulerInflightRequestsThreshold: 6 + customHeaders: + X-Scope-OrgID: tenant-1 + +query_frontend: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml new file mode 100644 index 00000000000..83befad0098 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/ci/offline/keda-autoscaling-values.yaml @@ -0,0 +1,52 @@ +# Pin kube version so results are the same for running in CI and locally where the installed kube version may be different. +kubeVersionOverride: "1.20" + +metaMonitoring: + grafanaAgent: + metrics: + enabled: false + # Leave the remote empty to use the default to send it to Mimir directly + # remote: # + +distributor: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 + +ruler: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 + +querier: + kedaAutoscaling: + enabled: true + minReplicaCount: 2 + maxReplicaCount: 10 + pollingInterval: 10 + querySchedulerInflightRequestsThreshold: 6 + customHeaders: + X-Scope-OrgID: tenant-1 + +query_frontend: + kedaAutoscaling: + enabled: true + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + customHeaders: + X-Scope-OrgID: tenant-1 \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl index 9c6e606cca9..795ca80efe4 100644 --- a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl +++ b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl @@ -511,6 +511,10 @@ Return if we should create a SecurityContextConstraints. Takes into account user {{ include "mimir.gatewayUrl" . }}/api/v1/push {{- end -}} +{{- define "mimir.remoteReadUrl.inCluster" -}} +{{ include "mimir.gatewayUrl" . }}{{ include "mimir.prometheusHttpPrefix" . }} +{{- end -}} + {{/* Creates dict for zone-aware replication configuration Params: diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml index 719155c4a7d..daf1a24a7ee 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -8,10 +8,12 @@ metadata: {{- toYaml .Values.distributor.annotations | nindent 4 }} namespace: {{ .Release.Namespace | quote }} spec: + {{- if not .Values.distributor.kedaAutoscaling.enabled }} # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it {{- if or (or (kindIs "int64" .Values.distributor.replicas) (kindIs "float64" .Values.distributor.replicas)) (.Values.distributor.replicas) }} replicas: {{ .Values.distributor.replicas }} {{- end }} + {{- end }} selector: matchLabels: {{- include "mimir.selectorLabels" (dict "ctx" . "component" "distributor" "memberlist" true) | nindent 6 }} diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml new file mode 100644 index 00000000000..d955c36c296 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-so.yaml @@ -0,0 +1,44 @@ +{{- if .Values.distributor.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "distributor") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "distributor") | nindent 4 }} + annotations: + {{- toYaml .Values.distributor.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} +spec: + advanced: + horizontalPodAutoscalerConfig: + {{- with .Values.distributor.kedaAutoscaling.behavior }} + behavior: + {{- toYaml . | nindent 8 }} + {{- end }} + maxReplicaCount: {{ .Values.distributor.kedaAutoscaling.maxReplicaCount }} + minReplicaCount: {{ .Values.distributor.kedaAutoscaling.minReplicaCount }} + pollingInterval: {{ .Values.distributor.kedaAutoscaling.pollingInterval }} + scaleTargetRef: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "distributor") }} + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="distributor",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000 + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $cpu_request := dig "requests" "cpu" nil .Values.distributor.resources }} + threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.distributor.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.distributor.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.distributor.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="distributor",namespace="{{ .Release.Namespace }}"}) and max by (pod) (up{container="distributor",namespace="{{ .Release.Namespace }}"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="distributor",namespace="{{ .Release.Namespace }}", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="distributor",namespace="{{ .Release.Namespace }}"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor",namespace="{{ .Release.Namespace }}", reason="OOMKilled"}) or vector(0)) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $mem_request := dig "requests" "memory" nil .Values.distributor.resources }} + threshold: {{ mulf (include "mimir.siToBytes" (dict "value" $mem_request)) (divf .Values.distributor.kedaAutoscaling.targetMemoryUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.distributor.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.distributor.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/lib/map-to-csv.tpl b/operations/helm/charts/mimir-distributed/templates/lib/map-to-csv.tpl new file mode 100644 index 00000000000..e31974d51b2 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/lib/map-to-csv.tpl @@ -0,0 +1,17 @@ +{{/* +Convert labels to string like: key1=value1, key2=value2, ... +Example: + customHeaders: + X-Scope-OrgID: tenant-1 +becomes: + customHeaders: "X-Scope-OrgID=tenant-1" +Params: + map = map to convert to csv string +*/}} +{{- define "mimir.lib.mapToCSVString" -}} +{{- $list := list -}} +{{- range $k, $v := $.map -}} +{{- $list = append $list (printf "%s=%s" $k $v) -}} +{{- end -}} +{{ join "," $list }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl index c2e28ce931f..88008fe9863 100644 --- a/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl @@ -56,3 +56,15 @@ cluster: {{ include "mimir.clusterName" $.ctx | quote}} {{- end -}} {{- end -}} + +{{- define "mimir.metaMonitoring.metrics.remoteReadUrl" -}} +{{- with $.ctx.Values.metaMonitoring.grafanaAgent.metrics }} +{{- $writeBackToMimir := not (.remote).url -}} +{{- if $writeBackToMimir -}} +{{- include "mimir.remoteReadUrl.inCluster" $.ctx }} +{{- else -}} +{{- $parsed := urlParse (.remote).url -}} +{{ $parsed.scheme }}://{{ $parsed.host }}/prometheus +{{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml index 91e78e7767d..ff2d1cf727a 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml @@ -8,10 +8,12 @@ metadata: {{- toYaml .Values.querier.annotations | nindent 4 }} namespace: {{ .Release.Namespace | quote }} spec: + {{- if not .Values.querier.kedaAutoscaling.enabled }} # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it {{- if or (or (kindIs "int64" .Values.querier.replicas) (kindIs "float64" .Values.querier.replicas)) (.Values.querier.replicas) }} replicas: {{ .Values.querier.replicas }} {{- end }} + {{- end }} selector: matchLabels: {{- include "mimir.selectorLabels" (dict "ctx" . "component" "querier" "memberlist" true) | nindent 6 }} diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-so.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-so.yaml new file mode 100644 index 00000000000..a2411f269f2 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-so.yaml @@ -0,0 +1,47 @@ +{{- if .Values.querier.kedaAutoscaling.enabled }} +{{- if not .Values.query_scheduler.enabled }} +{{- fail "KEDA autoscaling for querier requires query scheduler to be enabled" }} +{{- end }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "querier") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "querier") | nindent 4 }} + annotations: + {{- toYaml .Values.querier.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} +spec: + advanced: + horizontalPodAutoscalerConfig: + {{- with .Values.querier.kedaAutoscaling.behavior }} + behavior: + {{- toYaml . | nindent 8 }} + {{- end }} + maxReplicaCount: {{ .Values.querier.kedaAutoscaling.maxReplicaCount }} + minReplicaCount: {{ .Values.querier.kedaAutoscaling.minReplicaCount }} + pollingInterval: {{ .Values.querier.kedaAutoscaling.pollingInterval }} + scaleTargetRef: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "querier") }} + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: sum(max_over_time(cortex_query_scheduler_inflight_requests{container="query-scheduler",namespace="{{ .Release.Namespace }}",quantile="0.5"}[1m])) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + threshold: {{ .Values.querier.kedaAutoscaling.querySchedulerInflightRequestsThreshold | quote }} + {{- if .Values.querier.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.querier.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + name: cortex_querier_hpa_default + type: prometheus + - metadata: + query: sum(rate(cortex_querier_request_duration_seconds_sum{container="querier",namespace="{{ .Release.Namespace }}"}[1m])) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + threshold: {{ .Values.querier.kedaAutoscaling.querySchedulerInflightRequestsThreshold | quote }} + {{- if .Values.querier.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.querier.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + name: cortex_querier_hpa_default_requests_duration + type: prometheus +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 640151f6418..66f91fc6286 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -8,10 +8,12 @@ metadata: {{- toYaml .Values.query_frontend.annotations | nindent 4 }} namespace: {{ .Release.Namespace | quote }} spec: + {{- if not .Values.query_frontend.kedaAutoscaling.enabled }} # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it {{- if or (or (kindIs "int64" .Values.query_frontend.replicas) (kindIs "float64" .Values.query_frontend.replicas)) (.Values.query_frontend.replicas) }} replicas: {{ .Values.query_frontend.replicas }} {{- end }} + {{- end }} selector: matchLabels: {{- include "mimir.selectorLabels" (dict "ctx" . "component" "query-frontend") | nindent 6 }} diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml new file mode 100644 index 00000000000..4991198aeaf --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-so.yaml @@ -0,0 +1,44 @@ +{{- if .Values.query_frontend.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "query-frontend") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "query-frontend") | nindent 4 }} + annotations: + {{- toYaml .Values.query_frontend.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} +spec: + advanced: + horizontalPodAutoscalerConfig: + {{- with .Values.query_frontend.kedaAutoscaling.behavior }} + behavior: + {{- toYaml . | nindent 8 }} + {{- end }} + maxReplicaCount: {{ .Values.query_frontend.kedaAutoscaling.maxReplicaCount }} + minReplicaCount: {{ .Values.query_frontend.kedaAutoscaling.minReplicaCount }} + pollingInterval: {{ .Values.query_frontend.kedaAutoscaling.pollingInterval }} + scaleTargetRef: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "query-frontend") }} + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="query-frontend",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000 + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $cpu_request := dig "requests" "cpu" nil .Values.query_frontend.resources }} + threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.query_frontend.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.query_frontend.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.query_frontend.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="query-frontend",namespace="{{ .Release.Namespace }}"}) and max by (pod) (up{container="query-frontend",namespace="{{ .Release.Namespace }}"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="query-frontend",namespace="{{ .Release.Namespace }}", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="query-frontend",namespace="{{ .Release.Namespace }}"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend",namespace="{{ .Release.Namespace }}", reason="OOMKilled"}) or vector(0)) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $mem_request := dig "requests" "memory" nil .Values.query_frontend.resources }} + threshold: {{ mulf (include "mimir.siToBytes" (dict "value" $mem_request)) (divf .Values.query_frontend.kedaAutoscaling.targetMemoryUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.query_frontend.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.query_frontend.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-autoscaling.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-autoscaling.yaml new file mode 100644 index 00000000000..7ea02b3a260 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-autoscaling.yaml @@ -0,0 +1,46 @@ +{{- if .Values.ruler.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ruler") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "ruler" "memberlist" true) | nindent 4 }} + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} +spec: + advanced: + horizontalPodAutoscalerConfig: + {{- with .Values.ruler.kedaAutoscaling.behavior }} + behavior: + {{- toYaml . | nindent 8 }} + {{- end }} + maxReplicaCount: {{ .Values.ruler.kedaAutoscaling.maxReplicaCount }} + minReplicaCount: {{ .Values.ruler.kedaAutoscaling.minReplicaCount }} + pollingInterval: {{ .Values.ruler.kedaAutoscaling.pollingInterval }} + scaleTargetRef: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ruler") }} + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + metricName: ruler_cpu_hpa_default + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="{{ .Release.Namespace }}"}[5m]))[15m:]) * 1000 + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $cpu_request := dig "requests" "cpu" nil .Values.ruler.resources }} + threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.ruler.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.ruler.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.ruler.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus + - metadata: + metricName: ruler_memory_hpa_default + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="{{ .Release.Namespace }}"})[15m:]) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $mem_request := dig "requests" "memory" nil .Values.ruler.resources }} + threshold: {{ mulf (include "mimir.siToBytes" (dict "value" $mem_request)) (divf .Values.ruler.kedaAutoscaling.targetMemoryUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.ruler.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.ruler.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml index 549905d7996..d98899d2d03 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -9,7 +9,9 @@ metadata: {{- toYaml .Values.ruler.annotations | nindent 4 }} namespace: {{ .Release.Namespace | quote }} spec: + {{- if not .Values.ruler.kedaAutoscaling.enabled }} replicas: {{ .Values.ruler.replicas }} + {{- end }} selector: matchLabels: {{- include "mimir.selectorLabels" (dict "ctx" . "component" "ruler" "memberlist" true) | nindent 6 }} diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml new file mode 100644 index 00000000000..6478ccd2a74 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-so.yaml @@ -0,0 +1,46 @@ +{{- if .Values.ruler.kedaAutoscaling.enabled }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ruler") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "ruler") | nindent 4 }} + annotations: + {{- toYaml .Values.ruler.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} +spec: + advanced: + horizontalPodAutoscalerConfig: + {{- with .Values.ruler.kedaAutoscaling.behavior }} + behavior: + {{- toYaml . | nindent 8 }} + {{- end }} + maxReplicaCount: {{ .Values.ruler.kedaAutoscaling.maxReplicaCount }} + minReplicaCount: {{ .Values.ruler.kedaAutoscaling.minReplicaCount }} + pollingInterval: {{ .Values.ruler.kedaAutoscaling.pollingInterval }} + scaleTargetRef: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ruler") }} + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="{{ .Release.Namespace }}"}[5m])) and max by (pod) (up{container="ruler",namespace="{{ .Release.Namespace }}"}) > 0)[15m:]) * 1000 + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="{{ .Release.Namespace }}"}[5m]))[15m:]) * 1000 + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $cpu_request := dig "requests" "cpu" nil .Values.ruler.resources }} + threshold: {{ mulf (include "mimir.parseCPU" (dict "value" $cpu_request)) (divf .Values.ruler.kedaAutoscaling.targetCPUUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.ruler.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.ruler.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="ruler",namespace="{{ .Release.Namespace }}"}) and max by (pod) (up{container="ruler",namespace="{{ .Release.Namespace }}"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="ruler",namespace="{{ .Release.Namespace }}", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="ruler",namespace="{{ .Release.Namespace }}"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler",namespace="{{ .Release.Namespace }}", reason="OOMKilled"}) or vector(0)) + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="{{ .Release.Namespace }}"})[15m:]) + serverAddress: {{ include "mimir.metaMonitoring.metrics.remoteReadUrl" (dict "ctx" $) }} + {{- $mem_request := dig "requests" "memory" nil .Values.ruler.resources }} + threshold: {{ mulf (include "mimir.siToBytes" (dict "value" $mem_request)) (divf .Values.ruler.kedaAutoscaling.targetMemoryUtilizationPercentage 100) | floor | int64 | quote }} + {{- if .Values.ruler.kedaAutoscaling.customHeaders }} + customHeaders: {{ (include "mimir.lib.mapToCSVString" (dict "map" .Values.ruler.kedaAutoscaling.customHeaders)) | quote }} + {{- end }} + type: prometheus +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 2f793b3c3f6..49fa5ecaf50 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -89,7 +89,7 @@ configStorageType: ConfigMap externalConfigSecretName: '{{ include "mimir.resourceName" (dict "ctx" . "component" "config") }}' # -- When 'useExternalConfig' is true, then changing 'externalConfigVersion' triggers restart of services - otherwise changes to the configuration cause a restart. -externalConfigVersion: '0' +externalConfigVersion: "0" # --Vault Agent config to mount secrets to TLS configurable components. This requires Vault and Vault Agent to already be running. vaultAgent: @@ -592,7 +592,7 @@ alertmanager: # Subdirectory of Alertmanager data Persistent Volume to mount # Useful if the volume's root directory is not empty # - subPath: '' + subPath: "" # Alertmanager data Persistent Volume Storage Class # If defined, storageClassName: @@ -714,50 +714,73 @@ alertmanager: writePath: false # -- Zone definitions for alertmanager zones. Note: you have to redefine the whole list to change parts as YAML does not allow to modify parts of a list. zones: - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-a - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-a - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Alertmanager data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-b - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-b - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Alertmanager data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-c - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-c - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Alertmanager data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. - storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-a + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-a + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-b + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-b + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-c + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-c + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Alertmanager data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `alertmanager.persistentVolume.storageClass`. + storageClass: null distributor: # Setting it to null will produce a deployment without replicas set, allowing you to use autoscaling with the deployment replicas: 1 + # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having + # KEDA already installed in the Kubernetes cluster. The metrics for scaling are read + # from the the metamonitoring setup (metamonitoring.grafanaAgent.metrics.remote). + # Basic auth and extra HTTP headers from metamonitoring are ignored, please use customHeaders. + # The remote URL is used even if metamonitoring is disabled. + # See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions. + kedaAutoscaling: + enabled: false + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 100 + targetMemoryUtilizationPercentage: 100 + customHeaders: + {} + # X-Scope-OrgID: "" + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + service: annotations: {} labels: {} @@ -911,7 +934,7 @@ ingester: # Subdirectory of Ingester data Persistent Volume to mount # Useful if the volume's root directory is not empty - subPath: '' + subPath: "" # -- Enable StatefulSetAutoDeletePVC feature # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention @@ -919,7 +942,6 @@ ingester: whenDeleted: Retain whenScaled: Retain - # Ingester data Persistent Volume Storage Class # If defined, storageClassName: # If set to "-", storageClassName: "", which disables dynamic provisioning @@ -1010,45 +1032,45 @@ ingester: writePath: false # -- Zone definitions for ingester zones. Note: you have to redefine the whole list to change parts as YAML does not allow to modify parts of a list. zones: - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-a - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-a - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Ingester data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-b - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-b - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Ingester data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-c - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-c - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- Ingester data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. - storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-a + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-a + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-b + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-b + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-c + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-c + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- Ingester data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `ingester.persistentVolume.storageClass`. + storageClass: null overrides_exporter: enabled: true @@ -1082,7 +1104,8 @@ overrides_exporter: # -- topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. # labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services. - topologySpreadConstraints: {} + topologySpreadConstraints: + {} # maxSkew: 1 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: ScheduleAnyway @@ -1135,6 +1158,29 @@ ruler: enabled: true replicas: 1 + # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having + # KEDA already installed in the Kubernetes cluster. The metrics for scaling are read + # from the the metamonitoring setup (metamonitoring.grafanaAgent.metrics.remote). + # Basic auth and extra HTTP headers from metamonitoring are ignored, please use customHeaders. + # The remote URL is used even if metamonitoring is disabled. + # See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions. + kedaAutoscaling: + enabled: false + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 100 + targetMemoryUtilizationPercentage: 100 + customHeaders: + {} + # X-Scope-OrgID: "" + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + service: annotations: {} labels: {} @@ -1156,7 +1202,8 @@ ruler: memory: 128Mi # Additional ruler container arguments, e.g. log level (debug, info, warn, error) - extraArgs: {} + extraArgs: + {} # log.level: debug # Pod Labels @@ -1222,6 +1269,38 @@ ruler: querier: replicas: 2 + # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having + # KEDA already installed in the Kubernetes cluster. The metrics for scaling are read + # from the the metamonitoring setup (metamonitoring.grafanaAgent.metrics.remote). + # Basic auth and extra HTTP headers from metamonitoring are ignored, please use customHeaders. + # The remote URL is used even if metamonitoring is disabled. + # See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions. + kedaAutoscaling: + enabled: false + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + querySchedulerInflightRequestsThreshold: 12 + customHeaders: + {} + # X-Scope-OrgID: "" + behavior: + scaleDown: + policies: + - periodSeconds: 120 + type: Percent + value: 10 + stabilizationWindowSeconds: 600 + scaleUp: + policies: + - periodSeconds: 120 + type: Percent + value: 50 + - periodSeconds: 120 + type: Pods + value: 15 + stabilizationWindowSeconds: 60 + service: annotations: {} labels: {} @@ -1301,6 +1380,29 @@ query_frontend: # Setting it to null will produce a deployment without replicas set, allowing you to use autoscaling with the deployment replicas: 1 + # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having + # KEDA already installed in the Kubernetes cluster. The metrics for scaling are read + # from the the metamonitoring setup (metamonitoring.grafanaAgent.metrics.remote). + # Basic auth and extra HTTP headers from metamonitoring are ignored, please use customHeaders. + # The remote URL is used even if metamonitoring is disabled. + # See https://github.com/grafana/mimir/issues/7367 for more details on how to migrate to autoscaled resources without disruptions. + kedaAutoscaling: + enabled: false + minReplicaCount: 1 + maxReplicaCount: 10 + pollingInterval: 10 + targetCPUUtilizationPercentage: 75 + targetMemoryUtilizationPercentage: 100 + customHeaders: + {} + # X-Scope-OrgID: "" + behavior: + scaleDown: + policies: + - periodSeconds: 60 + type: Percent + value: 10 + service: annotations: {} labels: {} @@ -1535,8 +1637,7 @@ store_gateway: # Subdirectory of Store-gateway data Persistent Volume to mount # Useful if the volume's root directory is not empty # - subPath: '' - + subPath: "" # Store-gateway data Persistent Volume Storage Class # If defined, storageClassName: @@ -1622,45 +1723,45 @@ store_gateway: readPath: false # -- Zone definitions for store-gateway zones. Note: you have to redefine the whole list to change parts as YAML does not allow to modify parts of a list. zones: - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-a - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-a - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- StoreGateway data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-b - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-b - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- StoreGateway data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. - storageClass: null - # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - - name: zone-c - # -- nodeselector to restrict where pods of this zone can be placed. E.g.: - # nodeSelector: - # topology.kubernetes.io/zone: zone-c - nodeSelector: null - # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) - extraAffinity: {} - # -- StoreGateway data Persistent Volume Storage Class - # If defined, storageClassName: - # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning - # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. - storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-a + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-a + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- StoreGateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-b + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-b + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- StoreGateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. + storageClass: null + # -- Name of the zone, used in labels and selectors. Must follow Kubernetes naming restrictions: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + - name: zone-c + # -- nodeselector to restrict where pods of this zone can be placed. E.g.: + # nodeSelector: + # topology.kubernetes.io/zone: zone-c + nodeSelector: null + # -- extraAffinity adds user defined custom affinity rules (merged with generated rules) + extraAffinity: {} + # -- StoreGateway data Persistent Volume Storage Class + # If defined, storageClassName: + # If set to "-", then use `storageClassName: ""`, which disables dynamic provisioning + # If undefined or set to null (the default), then fall back to the value of `store_gateway.persistentVolume.storageClass`. + storageClass: null compactor: replicas: 1 @@ -1731,7 +1832,7 @@ compactor: # Subdirectory of compactor data Persistent Volume to mount # Useful if the volume's root directory is not empty # - subPath: '' + subPath: "" # compactor data Persistent Volume Storage Class # If defined, storageClassName: @@ -1892,7 +1993,7 @@ chunks-cache: # -- Add extended options for chunks-cache memcached container. The format is the same as for the memcached -o/--extend flag. # Example: # extraExtendedOptions: 'tls,no_hashexpand' - extraExtendedOptions: '' + extraExtendedOptions: "" # -- Additional CLI args for chunks-cache extraArgs: {} @@ -1984,7 +2085,7 @@ index-cache: # -- Add extended options for index-cache memcached container. The format is the same as for the memcached -o/--extend flag. # Example: # extraExtendedOptions: 'tls,modern,track_sizes' - extraExtendedOptions: '' + extraExtendedOptions: "" # -- Additional CLI args for index-cache extraArgs: {} @@ -2076,7 +2177,7 @@ metadata-cache: # -- Add extended options for metadata-cache memcached container. The format is the same as for the memcached -o/--extend flag. # Example: # extraExtendedOptions: 'tls,modern,track_sizes' - extraExtendedOptions: '' + extraExtendedOptions: "" # -- Additional CLI args for metadata-cache extraArgs: {} @@ -2168,7 +2269,7 @@ results-cache: # -- Add extended options for results-cache memcached container. The format is the same as for the memcached -o/--extend flag. # Example: # extraExtendedOptions: 'tls,modern,track_sizes' - extraExtendedOptions: '' + extraExtendedOptions: "" # -- Additional CLI args for results-cache extraArgs: {} @@ -2331,7 +2432,7 @@ nginx: terminationGracePeriodSeconds: 30 # -- Affinity for nginx pods. Passed through `tpl` and, thus, to be configured as string # @default -- Hard node and soft zone anti-affinity - affinity: '' + affinity: "" # -- topologySpreadConstraints allows to customize the default topologySpreadConstraints. This can be either a single dict as shown below or a slice of topologySpreadConstraints. # labelSelector is taken from the constraint itself (if it exists) or is generated by the chart using the same selectors as for services. @@ -2685,7 +2786,8 @@ gateway: initContainers: [] # -- SecurityContext override for gateway pods - securityContext: {} + securityContext: + {} # -- The SecurityContext for gateway containers containerSecurityContext: @@ -2738,7 +2840,7 @@ gateway: # By using the same name as the nginx/GEM gateway Service, Helm will not delete the Service Resource. # Instead, it will update the existing one in place. # If left as an empty string, a name is generated. - nameOverride: '' + nameOverride: "" ingress: enabled: false @@ -2747,9 +2849,9 @@ gateway: # By using the same name as the nginx/GEM gateway Ingress, Helm will not delete the Ingress Resource. # Instead, it will update the existing one in place. # If left as an empty string, a name is generated. - nameOverride: '' + nameOverride: "" # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 - ingressClassName: '' + ingressClassName: "" # -- Annotations for the Ingress annotations: {} # -- Hosts configuration for the Ingress @@ -3091,19 +3193,19 @@ metaMonitoring: # configuration to write logs to this Loki-compatible remote. Optional. remote: # -- Full URL for Loki push endpoint. Usually ends in /loki/api/v1/push - url: '' + url: "" auth: # -- Used to set X-Scope-OrgID header on requests. Usually not used in combination with username and password. - tenantId: '' + tenantId: "" # -- Basic authentication username. Optional. - username: '' + username: "" # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. - passwordSecretName: '' + passwordSecretName: "" # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. - passwordSecretKey: '' + passwordSecretKey: "" # -- Client configurations for the LogsInstance that will scrape Mimir pods. Follows the format of .remote. additionalClientConfigs: [] @@ -3143,18 +3245,18 @@ metaMonitoring: # -- Full URL for Prometheus remote-write. Usually ends in /push. # If you leave the url field empty, then the chart automatically fills in the # address of the GEM gateway Service or the Mimir NGINX Service. - url: '' + url: "" # -- Used to add HTTP headers to remote-write requests. headers: {} auth: # -- Basic authentication username. Optional. - username: '' + username: "" # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. - passwordSecretName: '' + passwordSecretName: "" # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. - passwordSecretKey: '' + passwordSecretKey: "" # -- Additional remote-write for the MetricsInstance that will scrape Mimir pods. Follows the format of .remote. additionalRemoteWriteConfigs: [] @@ -3175,7 +3277,7 @@ metaMonitoring: scrapeInterval: 60s # -- Sets the namespace of the resources. Leave empty or unset to use the same namespace as the Helm release. - namespace: '' + namespace: "" # -- Labels to add to all monitoring.grafana.com custom resources. # Does not affect the ServiceMonitors for kubernetes metrics; use serviceMonitor.labels for that. @@ -3474,7 +3576,7 @@ graphite: operator: In values: - graphite-querier - topologyKey: 'kubernetes.io/hostname' + topologyKey: "kubernetes.io/hostname" livenessProbe: httpGet: @@ -3556,7 +3658,7 @@ graphite: operator: In values: - graphite-write-proxy - topologyKey: 'kubernetes.io/hostname' + topologyKey: "kubernetes.io/hostname" livenessProbe: httpGet: @@ -3742,7 +3844,7 @@ smoke_test: repository: grafana/mimir-continuous-test tag: r277-59cd18d pullPolicy: IfNotPresent - tenantId: '' + tenantId: "" extraArgs: {} env: [] extraEnvFrom: [] @@ -3771,7 +3873,7 @@ continuous_test: # -- The tenant to use for tenantId or basicAuth authentication type # In case of tenantId authentication, it is injected as the X-Scope-OrgID header on requests. # In case of basicAuth, it is set as the username. - tenant: 'mimir-continuous-test' + tenant: "mimir-continuous-test" # -- Password for basicAuth auth (note: can be environment variable from secret attached in extraEnvFrom, e.g. $(PASSWORD)) # For GEM, it should contain an access token created for an access policy that allows `metrics:read` and `metrics:write` for the tenant. password: null diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml new file mode 100644 index 00000000000..74dd3131494 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml @@ -0,0 +1,406 @@ +--- +# Source: mimir-distributed/charts/minio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm +data: + initialize: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkBucketExists ($bucket) + # Check if the bucket exists, by using the exit code of `mc ls` + checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} stat myminio/$BUCKET > /dev/null 2>&1) + return $? + } + + # createBucket ($bucket, $policy, $purge) + # Ensure bucket exists, purging if asked to + createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + OBJECTLOCKING=$5 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist and set objectlocking if enabled (NOTE: versioning will be not changed if OBJECTLOCKING is set because it enables versioning to the Buckets created) + if ! checkBucketExists $BUCKET ; then + if [ ! -z $OBJECTLOCKING ] ; then + if [ $OBJECTLOCKING = true ] ; then + echo "Creating bucket with OBJECTLOCKING '$BUCKET'" + ${MC} mb --with-lock myminio/$BUCKET + elif [ $OBJECTLOCKING = false ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + fi + elif [ -z $OBJECTLOCKING ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + fi + + + # set versioning for bucket if objectlocking is disabled or not set + if [ $OBJECTLOCKING = false ] ; then + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} anonymous set $POLICY myminio/$BUCKET + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the buckets + createBucket mimir-tsdb "none" false false false + createBucket mimir-ruler "none" false false false + createBucket enterprise-metrics-tsdb "none" false false false + createBucket enterprise-metrics-admin "none" false false false + createBucket enterprise-metrics-ruler "none" false false false + + add-user: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters. + # Special characters for example : ',",<,>,{,} + MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_tmp" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkUserExists () + # Check if the user exists, by using the exit code of `mc admin user info` + checkUserExists() { + CMD=$(${MC} admin user info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1) + return $? + } + + # createUser ($policy) + createUser() { + POLICY=$1 + #check accessKey_and_secretKey_tmp file + if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then + echo "credentials file does not exist" + return 1 + fi + if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then + echo "credentials file is invalid" + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + return 1 + fi + USER=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) + # Create the user if it does not exist + if ! checkUserExists ; then + echo "Creating user '$USER'" + cat $MINIO_ACCESSKEY_SECRETKEY_TMP | ${MC} admin user add myminio + else + echo "User '$USER' already exists." + fi + #clean up credentials files. + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + set +e ; # policy already attach errors out, allow it. + ${MC} admin policy attach myminio $POLICY --user=$USER + set -e + else + echo "User '$USER' has no policy attached." + fi + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the users + echo console > $MINIO_ACCESSKEY_SECRETKEY_TMP + echo console123 >> $MINIO_ACCESSKEY_SECRETKEY_TMP + createUser consoleAdmin + + add-policy: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkPolicyExists ($policy) + # Check if the policy exists, by using the exit code of `mc admin policy info` + checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? + } + + # createPolicy($name, $filename) + createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy create myminio $NAME /config/$FILENAME.json + + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + add-svcacct: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters. + # Special characters for example : ',",<,>,{,} + MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_svcacct_tmp" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 2 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkSvcacctExists () + # Check if the svcacct exists, by using the exit code of `mc admin user svcacct info` + checkSvcacctExists() { + CMD=$(${MC} admin user svcacct info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1) + return $? + } + + # createSvcacct ($user) + createSvcacct () { + USER=$1 + FILENAME=$2 + #check accessKey_and_secretKey_tmp file + if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then + echo "credentials file does not exist" + return 1 + fi + if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then + echo "credentials file is invalid" + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + return 1 + fi + SVCACCT=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) + # Create the svcacct if it does not exist + if ! checkSvcacctExists ; then + echo "Creating svcacct '$SVCACCT'" + # Check if policy file is define + if [ -z $FILENAME ]; then + ${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) myminio $USER + else + ${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --policy /config/$FILENAME.json myminio $USER + fi + else + echo "Svcacct '$SVCACCT' already exists." + fi + #clean up credentials files. + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + custom-command: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # runCommand ($@) + # Run custom mc command + runCommand() { + ${MC} "$@" + return $? + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml new file mode 100644 index 00000000000..e7de1c27847 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/charts/minio/templates/console-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-minio-console + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 9001 + protocol: TCP + targetPort: 9001 + selector: + app: minio + release: keda-autoscaling-metamonitoring-values diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml new file mode 100644 index 00000000000..ff8f96e271d --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml @@ -0,0 +1,82 @@ +--- +# Source: mimir-distributed/charts/minio/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 100% + maxUnavailable: 0 + replicas: 1 + selector: + matchLabels: + app: minio + release: keda-autoscaling-metamonitoring-values + template: + metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + release: keda-autoscaling-metamonitoring-values + annotations: + checksum/secrets: de8a82395dac65491c35efc0125967b2b004ce65fc0a2a1c886b0faa97c71fcc + checksum/config: 10ad5c27c9190d0529bb1f1d04cbb2d3c39a5b1e597c1566a15d2be75efcc802 + spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + + serviceAccountName: minio-sa + containers: + - name: minio + image: "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server /export -S /etc/minio/certs/ --address :9000 --console-address :9001" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + - name: export + mountPath: /export + ports: + - name: http + containerPort: 9000 + - name: http-console + containerPort: 9001 + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: keda-autoscaling-metamonitoring-values-minio + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: keda-autoscaling-metamonitoring-values-minio + key: rootPassword + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + resources: + requests: + cpu: 100m + memory: 128Mi + volumes: + - name: export + persistentVolumeClaim: + claimName: keda-autoscaling-metamonitoring-values-minio + - name: minio-user + secret: + secretName: keda-autoscaling-metamonitoring-values-minio diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml new file mode 100644 index 00000000000..1ebf5bf3c9d --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml @@ -0,0 +1,74 @@ +--- +# Source: mimir-distributed/charts/minio/templates/post-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-metamonitoring-values-minio-post-job + labels: + app: minio-post-job + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +spec: + template: + metadata: + labels: + app: minio-job + release: keda-autoscaling-metamonitoring-values + spec: + restartPolicy: OnFailure + volumes: + - name: etc-path + emptyDir: {} + - name: tmp + emptyDir: {} + - name: minio-configuration + projected: + sources: + - configMap: + name: keda-autoscaling-metamonitoring-values-minio + - secret: + name: keda-autoscaling-metamonitoring-values-minio + serviceAccountName: minio-sa + containers: + - name: minio-make-bucket + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: [ "/bin/sh", "/config/initialize" ] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-metamonitoring-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: etc-path + mountPath: /etc/minio/mc + - name: tmp + mountPath: /tmp + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi + - name: minio-make-user + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: [ "/bin/sh", "/config/add-user" ] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-metamonitoring-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: etc-path + mountPath: /etc/minio/mc + - name: tmp + mountPath: /tmp + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml new file mode 100644 index 00000000000..803354240cd --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml @@ -0,0 +1,17 @@ +--- +# Source: mimir-distributed/charts/minio/templates/pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml new file mode 100644 index 00000000000..b0cee49f8f8 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/charts/minio/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm +type: Opaque +data: + rootUser: "Z3JhZmFuYS1taW1pcg==" + rootPassword: "c3VwZXJzZWNyZXQ=" diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/service.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/service.yaml new file mode 100644 index 00000000000..eb858312ba0 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: mimir-distributed/charts/minio/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-metamonitoring-values + heritage: Helm + monitoring: "true" +spec: + type: ClusterIP + ports: + - name: http + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: minio + release: keda-autoscaling-metamonitoring-values diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml new file mode 100644 index 00000000000..575ff3a4f62 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +# Source: mimir-distributed/charts/minio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "minio-sa" diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml new file mode 100644 index 00000000000..38ded93e517 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -0,0 +1,65 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.13.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/version: "v0.11.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-rollout-operator + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: rollout-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + image: "grafana/rollout-operator:v0.11.0" + imagePullPolicy: IfNotPresent + args: + - -kubernetes.namespace=citestns + ports: + - name: http-metrics + containerPort: 8001 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml new file mode 100644 index 00000000000..9e21233002f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: keda-autoscaling-metamonitoring-values-rollout-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - update diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml new file mode 100644 index 00000000000..0705b2de4c5 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: keda-autoscaling-metamonitoring-values-rollout-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: keda-autoscaling-metamonitoring-values-rollout-operator +subjects: +- kind: ServiceAccount + name: keda-autoscaling-metamonitoring-values-rollout-operator diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml new file mode 100644 index 00000000000..bce49891045 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: keda-autoscaling-metamonitoring-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.13.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/version: "v0.11.0" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml new file mode 100644 index 00000000000..9ef1b9e5602 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager-fallback-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +data: + alertmanager_fallback_config.yaml: | + receivers: + - name: default-receiver + route: + receiver: default-receiver diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml new file mode 100644 index 00000000000..75463bd28a0 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml new file mode 100644 index 00000000000..2aaa3e420ef --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -0,0 +1,137 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + updateStrategy: + type: RollingUpdate + serviceName: keda-autoscaling-metamonitoring-values-mimir-alertmanager + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + tolerations: + [] + terminationGracePeriodSeconds: 60 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: tmp + emptyDir: {} + - name: active-queries + emptyDir: {} + - name: alertmanager-fallback-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager-fallback-config + containers: + - name: alertmanager + imagePullPolicy: IfNotPresent + args: + - "-target=alertmanager" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: alertmanager-fallback-config + mountPath: /configs/ + - name: tmp + mountPath: /tmp + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 10m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml new file mode 100644 index 00000000000..e11a68ff4ab --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + - port: 9094 + protocol: TCP + name: cluster + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 00000000000..de741d73b39 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml new file mode 100644 index 00000000000..a2c2b3413a8 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 00000000000..ca6a193fd8f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,129 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + updateStrategy: + type: RollingUpdate + serviceName: keda-autoscaling-metamonitoring-values-mimir-compactor + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: compactor + imagePullPolicy: IfNotPresent + args: + - "-target=compactor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml new file mode 100644 index 00000000000..cbbd7517191 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: compactor diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml new file mode 100644 index 00000000000..9d26d43238a --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -0,0 +1,134 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: distributor + imagePullPolicy: IfNotPresent + args: + - "-target=distributor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # When write requests go through distributors via gRPC, we want gRPC clients to re-resolve the distributors DNS + # endpoint before the distributor process is terminated, in order to avoid any failures during graceful shutdown. + # To achieve it, we set a shutdown delay greater than the gRPC max connection age. + - "-server.grpc.keepalive.max-connection-age=60s" + - "-server.grpc.keepalive.max-connection-age-grace=5m" + - "-server.grpc.keepalive.max-connection-idle=1m" + - "-shutdown-delay=90s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "8" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + tolerations: + [] + terminationGracePeriodSeconds: 100 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml new file mode 100644 index 00000000000..2c462258afe --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml new file mode 100644 index 00000000000..1b9ff4b4e88 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml @@ -0,0 +1,43 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-metamonitoring-values-mimir-distributor + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="citestns"}[5m])) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0)[15m:]) * 1000 + serverAddress: https://mimir.example.com/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="distributor",namespace="citestns"}) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="distributor",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="distributor",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor",namespace="citestns", reason="OOMKilled"}) or vector(0)) + serverAddress: https://mimir.example.com/prometheus + threshold: "429496729" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 00000000000..fa0e3a4d1d2 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-distributor-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml new file mode 100644 index 00000000000..3d1566d42ce --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml new file mode 100644 index 00000000000..f90129dcecc --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-gossip-ring + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: gossip-ring + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + appProtocol: tcp + protocol: TCP + targetPort: 7946 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/part-of: memberlist diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml new file mode 100644 index 00000000000..64c8dd46428 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 00000000000..b947ff656a7 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,420 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 00000000000..5ed11a9796d --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml new file mode 100644 index 00000000000..13c6e70f325 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/mimir-config.yaml new file mode 100644 index 00000000000..7328fb62ea3 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -0,0 +1,118 @@ +--- +# Source: mimir-distributed/templates/mimir-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + mimir.yaml: | + + activity_tracker: + filepath: /active-query-tracker/activity.log + alertmanager: + data_dir: /data + enable_api: true + external_url: /alertmanager + fallback_config_file: /configs/alertmanager_fallback_config.yaml + alertmanager_storage: + backend: s3 + s3: + access_key_id: grafana-mimir + bucket_name: mimir-ruler + endpoint: keda-autoscaling-metamonitoring-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + blocks_storage: + backend: s3 + bucket_store: + sync_dir: /data/tsdb-sync + s3: + access_key_id: grafana-mimir + bucket_name: mimir-tsdb + endpoint: keda-autoscaling-metamonitoring-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + tsdb: + dir: /data/tsdb + head_compaction_interval: 15m + wal_replay_concurrency: 3 + compactor: + compaction_interval: 30m + data_dir: /data + deletion_delay: 2h + first_level_compaction_wait_period: 25m + max_closing_blocks_concurrency: 2 + max_opening_blocks_concurrency: 4 + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + wait_stability_min_duration: 1m + symbols_flushers_concurrency: 4 + distributor: + ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + frontend: + parallelize_shardable_queries: true + scheduler_address: keda-autoscaling-metamonitoring-values-mimir-query-scheduler-headless.citestns.svc:9095 + frontend_worker: + grpc_client_config: + max_send_msg_size: 419430400 + scheduler_address: keda-autoscaling-metamonitoring-values-mimir-query-scheduler-headless.citestns.svc:9095 + ingester: + ring: + final_sleep: 0s + heartbeat_period: 2m + heartbeat_timeout: 10m + num_tokens: 512 + tokens_file_path: /data/tokens + unregister_on_shutdown: false + zone_awareness_enabled: true + ingester_client: + grpc_client_config: + max_recv_msg_size: 104857600 + max_send_msg_size: 104857600 + limits: + max_cache_freshness: 10m + max_query_parallelism: 240 + max_total_query_length: 12000h + memberlist: + abort_if_cluster_join_fails: false + compression_enabled: false + join_members: + - dns+keda-autoscaling-metamonitoring-values-mimir-gossip-ring.citestns.svc.cluster.local.:7946 + querier: + max_concurrent: 16 + query_scheduler: + max_outstanding_requests_per_tenant: 800 + ruler: + alertmanager_url: dnssrvnoa+http://_http-metrics._tcp.keda-autoscaling-metamonitoring-values-mimir-alertmanager-headless.citestns.svc.cluster.local./alertmanager + enable_api: true + rule_path: /data + ruler_storage: + backend: s3 + s3: + access_key_id: grafana-mimir + bucket_name: mimir-ruler + endpoint: keda-autoscaling-metamonitoring-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + runtime_config: + file: /var/mimir/runtime.yaml + store_gateway: + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + kvstore: + prefix: multi-zone/ + tokens_file_path: /data/tokens + unregister_on_shutdown: false + wait_stability_min_duration: 1m + zone_awareness_enabled: true + usage_stats: + installation_mode: helm diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml new file mode 100644 index 00000000000..8738d73ff97 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml @@ -0,0 +1,47 @@ +--- +# Source: mimir-distributed/templates/minio/create-bucket-job.yaml +# Minio provides post-install hook to create bucket +# however the hook won't be executed if helm install is run +# with --wait flag. Hence this job is a workaround for that. +# See https://github.com/grafana/mimir/issues/2464 +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-make-minio-buckets-5.0.14 + namespace: "citestns" + labels: + app: mimir-distributed-make-bucket-job + release: keda-autoscaling-metamonitoring-values + heritage: Helm +spec: + template: + metadata: + labels: + app: mimir-distributed-job + release: keda-autoscaling-metamonitoring-values + spec: + restartPolicy: OnFailure + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: keda-autoscaling-metamonitoring-values-minio + - secret: + name: keda-autoscaling-metamonitoring-values-minio + containers: + - name: minio-mc + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-metamonitoring-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml new file mode 100644 index 00000000000..9582225c713 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -0,0 +1,125 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + nginx.conf: | + worker_processes 5; ## Default: 1 + error_log /dev/stderr error; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stderr main; + + sendfile on; + tcp_nopush on; + proxy_http_version 1.1; + resolver kube-dns.kube-system.svc.cluster.local.; + + # Ensure that X-Scope-OrgID is always present, default to the no_auth_tenant for backwards compatibility when multi-tenancy was turned off. + map $http_x_scope_orgid $ensured_x_scope_orgid { + default $http_x_scope_orgid; + "" "anonymous"; + } + + proxy_read_timeout 300; + server { + listen 8080; + listen [::]:8080; + + location = / { + return 200 'OK'; + auth_basic off; + } + + proxy_set_header X-Scope-OrgID $ensured_x_scope_orgid; + + # Distributor endpoints + location /distributor { + set $distributor keda-autoscaling-metamonitoring-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location = /api/v1/push { + set $distributor keda-autoscaling-metamonitoring-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location /otlp/v1/metrics { + set $distributor keda-autoscaling-metamonitoring-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + + # Alertmanager endpoints + location /alertmanager { + set $alertmanager keda-autoscaling-metamonitoring-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /multitenant_alertmanager/status { + set $alertmanager keda-autoscaling-metamonitoring-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /api/v1/alerts { + set $alertmanager keda-autoscaling-metamonitoring-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + + # Ruler endpoints + location /prometheus/config/v1/rules { + set $ruler keda-autoscaling-metamonitoring-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location /prometheus/api/v1/rules { + set $ruler keda-autoscaling-metamonitoring-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + location /prometheus/api/v1/alerts { + set $ruler keda-autoscaling-metamonitoring-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location = /ruler/ring { + set $ruler keda-autoscaling-metamonitoring-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + # Rest of /prometheus goes to the query frontend + location /prometheus { + set $query_frontend keda-autoscaling-metamonitoring-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Buildinfo endpoint can go to any component + location = /api/v1/status/buildinfo { + set $query_frontend keda-autoscaling-metamonitoring-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Compactor endpoint for uploading blocks + location /api/v1/upload/block/ { + set $compactor keda-autoscaling-metamonitoring-values-mimir-compactor.citestns.svc.cluster.local.; + proxy_pass http://$compactor:8080$request_uri; + } + } + } diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml new file mode 100644 index 00000000000..fc2646bb7bb --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml @@ -0,0 +1,94 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + template: + metadata: + annotations: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: nginx + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + containers: + - name: nginx + image: docker.io/nginxinc/nginx-unprivileged:1.25-alpine + imagePullPolicy: IfNotPresent + ports: + - name: http-metric + containerPort: 8080 + protocol: TCP + env: + envFrom: + readinessProbe: + httpGet: + path: / + port: http-metric + initialDelaySeconds: 15 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: tmp + mountPath: /tmp + - name: docker-entrypoint-d-override + mountPath: /docker-entrypoint.d + resources: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-nginx + - name: tmp + emptyDir: {} + - name: docker-entrypoint-d-override + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml new file mode 100644 index 00000000000..84811cf7583 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml new file mode 100644 index 00000000000..48b4bae5f08 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml @@ -0,0 +1,25 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - name: http-metric + port: 80 + targetPort: http-metric + protocol: TCP + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: nginx diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml new file mode 100644 index 00000000000..6c4e86d3819 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -0,0 +1,114 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + name: keda-autoscaling-metamonitoring-values-mimir-overrides-exporter + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: overrides-exporter + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: overrides-exporter + imagePullPolicy: IfNotPresent + args: + - "-target=overrides-exporter" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: + nodeSelector: + {} + affinity: + {} + + tolerations: + [] + terminationGracePeriodSeconds: 60 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml new file mode 100644 index 00000000000..6d2e2a87fe6 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml new file mode 100644 index 00000000000..d3e866f2475 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: overrides-exporter diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000000..95f30856020 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +--- +# Source: mimir-distributed/templates/podsecuritypolicy.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: keda-autoscaling-metamonitoring-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + annotations: + "seccomp.security.alpha.kubernetes.io/allowedProfileNames": runtime/default +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'configMap' + - 'emptyDir' + - 'persistentVolumeClaim' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-dep.yaml new file mode 100644 index 00000000000..da1e26c845c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -0,0 +1,126 @@ +--- +# Source: mimir-distributed/templates/querier/querier-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + annotations: + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: querier + imagePullPolicy: IfNotPresent + args: + - "-target=querier" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml new file mode 100644 index 00000000000..a509ffccd64 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/querier/querier-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-so.yaml new file mode 100644 index 00000000000..3e200b5a50f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-so.yaml @@ -0,0 +1,55 @@ +--- +# Source: mimir-distributed/templates/querier/querier-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 120 + type: Percent + value: 10 + stabilizationWindowSeconds: 600 + scaleUp: + policies: + - periodSeconds: 120 + type: Percent + value: 50 + - periodSeconds: 120 + type: Pods + value: 15 + stabilizationWindowSeconds: 60 + maxReplicaCount: 10 + minReplicaCount: 2 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-metamonitoring-values-mimir-querier + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: sum(max_over_time(cortex_query_scheduler_inflight_requests{container="query-scheduler",namespace="citestns",quantile="0.5"}[1m])) + serverAddress: https://mimir.example.com/prometheus + threshold: "6" + customHeaders: "X-Scope-OrgID=tenant-1" + name: cortex_querier_hpa_default + type: prometheus + - metadata: + query: sum(rate(cortex_querier_request_duration_seconds_sum{container="querier",namespace="citestns"}[1m])) + serverAddress: https://mimir.example.com/prometheus + threshold: "6" + customHeaders: "X-Scope-OrgID=tenant-1" + name: cortex_querier_hpa_default_requests_duration + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-svc.yaml new file mode 100644 index 00000000000..507c38e727e --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/querier/querier-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: querier diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 00000000000..de87306871f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,122 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-frontend + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: query-frontend + imagePullPolicy: IfNotPresent + args: + - "-target=query-frontend" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Reduce the likelihood of queries hitting terminated query-frontends. + - "-server.grpc.keepalive.max-connection-age=30s" + - "-shutdown-delay=90s" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + tolerations: + [] + terminationGracePeriodSeconds: 390 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml new file mode 100644 index 00000000000..b2699cba012 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml new file mode 100644 index 00000000000..16ff3f5c46b --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml @@ -0,0 +1,43 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 60 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-metamonitoring-values-mimir-query-frontend + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="citestns"}[5m])) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0)[15m:]) * 1000 + serverAddress: https://mimir.example.com/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="query-frontend",namespace="citestns"}) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="query-frontend",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="query-frontend",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend",namespace="citestns", reason="OOMKilled"}) or vector(0)) + serverAddress: https://mimir.example.com/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 00000000000..8cc5e8961a9 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-frontend diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml new file mode 100644 index 00000000000..225a289ca2d --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml @@ -0,0 +1,117 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-scheduler + annotations: + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: query-scheduler + imagePullPolicy: IfNotPresent + args: + - "-target=query-scheduler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml new file mode 100644 index 00000000000..e4b3608fe64 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-scheduler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml new file mode 100644 index 00000000000..5d85bb81fe9 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-scheduler-headless + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml new file mode 100644 index 00000000000..de62a475c81 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/role.yaml new file mode 100644 index 00000000000..6aab5a7fa8b --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/role.yaml @@ -0,0 +1,16 @@ +--- +# Source: mimir-distributed/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: keda-autoscaling-metamonitoring-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [keda-autoscaling-metamonitoring-values-mimir] diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/rolebinding.yaml new file mode 100644 index 00000000000..deeca95433c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: mimir-distributed/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: keda-autoscaling-metamonitoring-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: keda-autoscaling-metamonitoring-values-mimir +subjects: +- kind: ServiceAccount + name: keda-autoscaling-metamonitoring-values-mimir +- kind: ServiceAccount + name: keda-autoscaling-metamonitoring-values-mimir-distributed diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml new file mode 100644 index 00000000000..0247387f002 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml @@ -0,0 +1,46 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-autoscaling.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + metricName: ruler_cpu_hpa_default + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m]))[15m:]) * 1000 + serverAddress: https://mimir.example.com/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + metricName: ruler_memory_hpa_default + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="citestns"})[15m:]) + serverAddress: https://mimir.example.com/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml new file mode 100644 index 00000000000..db34992a3bb --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -0,0 +1,126 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + strategy: + rollingUpdate: + maxSurge: 50% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: ruler + imagePullPolicy: IfNotPresent + args: + - "-target=ruler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-distributor.remote-timeout=10s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml new file mode 100644 index 00000000000..1f2609ba449 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml new file mode 100644 index 00000000000..63ee25baa4a --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml @@ -0,0 +1,45 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m])) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0)[15m:]) * 1000 + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m]))[15m:]) * 1000 + serverAddress: https://mimir.example.com/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="ruler",namespace="citestns"}) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="ruler",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="ruler",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler",namespace="citestns", reason="OOMKilled"}) or vector(0)) + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="citestns"})[15m:]) + serverAddress: https://mimir.example.com/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml new file mode 100644 index 00000000000..6f31b30cdbf --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: ruler diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/runtime-configmap.yaml new file mode 100644 index 00000000000..51c24e65d6f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/templates/runtime-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + runtime.yaml: | + + {} diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/serviceaccount.yaml new file mode 100644 index 00000000000..a1e8ae72ece --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: keda-autoscaling-metamonitoring-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml new file mode 100644 index 00000000000..4c9a5ffc6c8 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -0,0 +1,53 @@ +--- +# Source: mimir-distributed/templates/smoke-test/smoke-test-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-smoke-test + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: smoke-test + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test + namespace: "citestns" +spec: + backoffLimit: 5 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: smoke-test + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: smoke-test + imagePullPolicy: IfNotPresent + args: + - "-tests.smoke-test" + - "-tests.write-endpoint=http://keda-autoscaling-metamonitoring-values-mimir-nginx.citestns.svc:80" + - "-tests.read-endpoint=http://keda-autoscaling-metamonitoring-values-mimir-nginx.citestns.svc:80/prometheus" + - "-tests.tenant-id=" + - "-tests.write-read-series-test.num-series=1000" + - "-tests.write-read-series-test.max-query-age=48h" + - "-server.metrics-port=8080" + volumeMounts: + env: + envFrom: + restartPolicy: OnFailure + volumes: diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml new file mode 100644 index 00000000000..105138c8192 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 00000000000..c2ea0dec7db --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,432 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-metamonitoring-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-metamonitoring-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-metamonitoring-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 00000000000..e1f112098fa --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway diff --git a/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 00000000000..c97ced80815 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-metamonitoring-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-metamonitoring-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-metamonitoring-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml new file mode 100644 index 00000000000..0200d49a75c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/configmap.yaml @@ -0,0 +1,406 @@ +--- +# Source: mimir-distributed/charts/minio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm +data: + initialize: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkBucketExists ($bucket) + # Check if the bucket exists, by using the exit code of `mc ls` + checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} stat myminio/$BUCKET > /dev/null 2>&1) + return $? + } + + # createBucket ($bucket, $policy, $purge) + # Ensure bucket exists, purging if asked to + createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + OBJECTLOCKING=$5 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist and set objectlocking if enabled (NOTE: versioning will be not changed if OBJECTLOCKING is set because it enables versioning to the Buckets created) + if ! checkBucketExists $BUCKET ; then + if [ ! -z $OBJECTLOCKING ] ; then + if [ $OBJECTLOCKING = true ] ; then + echo "Creating bucket with OBJECTLOCKING '$BUCKET'" + ${MC} mb --with-lock myminio/$BUCKET + elif [ $OBJECTLOCKING = false ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + fi + elif [ -z $OBJECTLOCKING ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + fi + + + # set versioning for bucket if objectlocking is disabled or not set + if [ $OBJECTLOCKING = false ] ; then + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} anonymous set $POLICY myminio/$BUCKET + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the buckets + createBucket mimir-tsdb "none" false false false + createBucket mimir-ruler "none" false false false + createBucket enterprise-metrics-tsdb "none" false false false + createBucket enterprise-metrics-admin "none" false false false + createBucket enterprise-metrics-ruler "none" false false false + + add-user: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters. + # Special characters for example : ',",<,>,{,} + MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_tmp" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkUserExists () + # Check if the user exists, by using the exit code of `mc admin user info` + checkUserExists() { + CMD=$(${MC} admin user info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1) + return $? + } + + # createUser ($policy) + createUser() { + POLICY=$1 + #check accessKey_and_secretKey_tmp file + if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then + echo "credentials file does not exist" + return 1 + fi + if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then + echo "credentials file is invalid" + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + return 1 + fi + USER=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) + # Create the user if it does not exist + if ! checkUserExists ; then + echo "Creating user '$USER'" + cat $MINIO_ACCESSKEY_SECRETKEY_TMP | ${MC} admin user add myminio + else + echo "User '$USER' already exists." + fi + #clean up credentials files. + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + set +e ; # policy already attach errors out, allow it. + ${MC} admin policy attach myminio $POLICY --user=$USER + set -e + else + echo "User '$USER' has no policy attached." + fi + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the users + echo console > $MINIO_ACCESSKEY_SECRETKEY_TMP + echo console123 >> $MINIO_ACCESSKEY_SECRETKEY_TMP + createUser consoleAdmin + + add-policy: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkPolicyExists ($policy) + # Check if the policy exists, by using the exit code of `mc admin policy info` + checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? + } + + # createPolicy($name, $filename) + createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy create myminio $NAME /config/$FILENAME.json + + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + add-svcacct: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters. + # Special characters for example : ',",<,>,{,} + MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_svcacct_tmp" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 2 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkSvcacctExists () + # Check if the svcacct exists, by using the exit code of `mc admin user svcacct info` + checkSvcacctExists() { + CMD=$(${MC} admin user svcacct info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1) + return $? + } + + # createSvcacct ($user) + createSvcacct () { + USER=$1 + FILENAME=$2 + #check accessKey_and_secretKey_tmp file + if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then + echo "credentials file does not exist" + return 1 + fi + if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then + echo "credentials file is invalid" + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + return 1 + fi + SVCACCT=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) + # Create the svcacct if it does not exist + if ! checkSvcacctExists ; then + echo "Creating svcacct '$SVCACCT'" + # Check if policy file is define + if [ -z $FILENAME ]; then + ${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) myminio $USER + else + ${MC} admin user svcacct add --access-key $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --secret-key $(tail -n1 $MINIO_ACCESSKEY_SECRETKEY_TMP) --policy /config/$FILENAME.json myminio $USER + fi + else + echo "Svcacct '$SVCACCT' already exists." + fi + #clean up credentials files. + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + custom-command: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/tmp/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # runCommand ($@) + # Run custom mc command + runCommand() { + ${MC} "$@" + return $? + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml new file mode 100644 index 00000000000..60c2e90c7b5 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/console-service.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/charts/minio/templates/console-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-minio-console + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 9001 + protocol: TCP + targetPort: 9001 + selector: + app: minio + release: keda-autoscaling-values diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml new file mode 100644 index 00000000000..7f6a1e87608 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/deployment.yaml @@ -0,0 +1,82 @@ +--- +# Source: mimir-distributed/charts/minio/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm +spec: + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 100% + maxUnavailable: 0 + replicas: 1 + selector: + matchLabels: + app: minio + release: keda-autoscaling-values + template: + metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + release: keda-autoscaling-values + annotations: + checksum/secrets: 9320cfac2af83051779ce527e6bd973f8c624f5096262bf094e9cbd909f76d8f + checksum/config: f9d005cbbc5c7b2117ab56e60ffd4965fb496b88bc54cafd66034d02cc6a77f9 + spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + + serviceAccountName: minio-sa + containers: + - name: minio + image: "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" + imagePullPolicy: IfNotPresent + command: + - "/bin/sh" + - "-ce" + - "/usr/bin/docker-entrypoint.sh minio server /export -S /etc/minio/certs/ --address :9000 --console-address :9001" + volumeMounts: + - name: minio-user + mountPath: "/tmp/credentials" + readOnly: true + - name: export + mountPath: /export + ports: + - name: http + containerPort: 9000 + - name: http-console + containerPort: 9001 + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: keda-autoscaling-values-minio + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: keda-autoscaling-values-minio + key: rootPassword + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + resources: + requests: + cpu: 100m + memory: 128Mi + volumes: + - name: export + persistentVolumeClaim: + claimName: keda-autoscaling-values-minio + - name: minio-user + secret: + secretName: keda-autoscaling-values-minio diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml new file mode 100644 index 00000000000..e9c8fb027d9 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/post-job.yaml @@ -0,0 +1,74 @@ +--- +# Source: mimir-distributed/charts/minio/templates/post-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-values-minio-post-job + labels: + app: minio-post-job + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +spec: + template: + metadata: + labels: + app: minio-job + release: keda-autoscaling-values + spec: + restartPolicy: OnFailure + volumes: + - name: etc-path + emptyDir: {} + - name: tmp + emptyDir: {} + - name: minio-configuration + projected: + sources: + - configMap: + name: keda-autoscaling-values-minio + - secret: + name: keda-autoscaling-values-minio + serviceAccountName: minio-sa + containers: + - name: minio-make-bucket + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: [ "/bin/sh", "/config/initialize" ] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: etc-path + mountPath: /etc/minio/mc + - name: tmp + mountPath: /tmp + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi + - name: minio-make-user + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: [ "/bin/sh", "/config/add-user" ] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: etc-path + mountPath: /etc/minio/mc + - name: tmp + mountPath: /tmp + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml new file mode 100644 index 00000000000..11bc1f947d9 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/pvc.yaml @@ -0,0 +1,17 @@ +--- +# Source: mimir-distributed/charts/minio/templates/pvc.yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "5Gi" diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml new file mode 100644 index 00000000000..04595ba6686 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/secrets.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/charts/minio/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm +type: Opaque +data: + rootUser: "Z3JhZmFuYS1taW1pcg==" + rootPassword: "c3VwZXJzZWNyZXQ=" diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/service.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/service.yaml new file mode 100644 index 00000000000..d1619c6f939 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/service.yaml @@ -0,0 +1,22 @@ +--- +# Source: mimir-distributed/charts/minio/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-minio + labels: + app: minio + chart: minio-5.0.14 + release: keda-autoscaling-values + heritage: Helm + monitoring: "true" +spec: + type: ClusterIP + ports: + - name: http + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: minio + release: keda-autoscaling-values diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml new file mode 100644 index 00000000000..575ff3a4f62 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/minio/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +--- +# Source: mimir-distributed/charts/minio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "minio-sa" diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml new file mode 100644 index 00000000000..18f5d5cb533 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -0,0 +1,65 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.13.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/version: "v0.11.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-values + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-values + spec: + serviceAccountName: keda-autoscaling-values-rollout-operator + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: rollout-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + image: "grafana/rollout-operator:v0.11.0" + imagePullPolicy: IfNotPresent + args: + - -kubernetes.namespace=citestns + ports: + - name: http-metrics + containerPort: 8001 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml new file mode 100644 index 00000000000..b4252160a30 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: keda-autoscaling-values-rollout-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - update diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml new file mode 100644 index 00000000000..080ae596fe7 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: keda-autoscaling-values-rollout-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: keda-autoscaling-values-rollout-operator +subjects: +- kind: ServiceAccount + name: keda-autoscaling-values-rollout-operator diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml new file mode 100644 index 00000000000..3b431d30409 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: keda-autoscaling-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.13.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/version: "v0.11.0" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml new file mode 100644 index 00000000000..544301b72ea --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-values-mimir-alertmanager-fallback-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +data: + alertmanager_fallback_config.yaml: | + receivers: + - name: default-receiver + route: + receiver: default-receiver diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml new file mode 100644 index 00000000000..502e25a0fa0 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml new file mode 100644 index 00000000000..2e1f979543c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -0,0 +1,137 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + updateStrategy: + type: RollingUpdate + serviceName: keda-autoscaling-values-mimir-alertmanager + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + tolerations: + [] + terminationGracePeriodSeconds: 60 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: tmp + emptyDir: {} + - name: active-queries + emptyDir: {} + - name: alertmanager-fallback-config + configMap: + name: keda-autoscaling-values-mimir-alertmanager-fallback-config + containers: + - name: alertmanager + imagePullPolicy: IfNotPresent + args: + - "-target=alertmanager" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: alertmanager-fallback-config + mountPath: /configs/ + - name: tmp + mountPath: /tmp + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 10m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml new file mode 100644 index 00000000000..c7afaa239cc --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-alertmanager-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + - port: 9094 + protocol: TCP + name: cluster + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 00000000000..5779998844f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml new file mode 100644 index 00000000000..602e3d33d26 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 00000000000..7d5b98ade8e --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,129 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + updateStrategy: + type: RollingUpdate + serviceName: keda-autoscaling-values-mimir-compactor + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: compactor + imagePullPolicy: IfNotPresent + args: + - "-target=compactor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml new file mode 100644 index 00000000000..3596339184a --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: compactor diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml new file mode 100644 index 00000000000..91b78566eaa --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -0,0 +1,134 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: distributor + imagePullPolicy: IfNotPresent + args: + - "-target=distributor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # When write requests go through distributors via gRPC, we want gRPC clients to re-resolve the distributors DNS + # endpoint before the distributor process is terminated, in order to avoid any failures during graceful shutdown. + # To achieve it, we set a shutdown delay greater than the gRPC max connection age. + - "-server.grpc.keepalive.max-connection-age=60s" + - "-server.grpc.keepalive.max-connection-age-grace=5m" + - "-server.grpc.keepalive.max-connection-idle=1m" + - "-shutdown-delay=90s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "8" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + tolerations: + [] + terminationGracePeriodSeconds: 100 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml new file mode 100644 index 00000000000..f2dd5eb57ad --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml new file mode 100644 index 00000000000..bf3162cc374 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-so.yaml @@ -0,0 +1,43 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-values-mimir-distributor + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="distributor",namespace="citestns"}[5m])) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0)[15m:]) * 1000 + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="distributor",namespace="citestns"}) and max by (pod) (up{container="distributor",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="distributor",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="distributor",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="distributor",namespace="citestns", reason="OOMKilled"}) or vector(0)) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "429496729" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 00000000000..ee88354f9e4 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-distributor-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml new file mode 100644 index 00000000000..92e0c16ebc6 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml new file mode 100644 index 00000000000..555cf28f3e1 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-gossip-ring + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: gossip-ring + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + appProtocol: tcp + protocol: TCP + targetPort: 7946 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/part-of: memberlist diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml new file mode 100644 index 00000000000..7dc8d9cb750 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-ingester + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 00000000000..74eb4ce8195 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,420 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-ingester-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 00000000000..13072517ce4 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-ingester-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml new file mode 100644 index 00000000000..9a21bebb46c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/mimir-config.yaml new file mode 100644 index 00000000000..210a1ef7953 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -0,0 +1,118 @@ +--- +# Source: mimir-distributed/templates/mimir-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-values-mimir-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + mimir.yaml: | + + activity_tracker: + filepath: /active-query-tracker/activity.log + alertmanager: + data_dir: /data + enable_api: true + external_url: /alertmanager + fallback_config_file: /configs/alertmanager_fallback_config.yaml + alertmanager_storage: + backend: s3 + s3: + access_key_id: grafana-mimir + bucket_name: mimir-ruler + endpoint: keda-autoscaling-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + blocks_storage: + backend: s3 + bucket_store: + sync_dir: /data/tsdb-sync + s3: + access_key_id: grafana-mimir + bucket_name: mimir-tsdb + endpoint: keda-autoscaling-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + tsdb: + dir: /data/tsdb + head_compaction_interval: 15m + wal_replay_concurrency: 3 + compactor: + compaction_interval: 30m + data_dir: /data + deletion_delay: 2h + first_level_compaction_wait_period: 25m + max_closing_blocks_concurrency: 2 + max_opening_blocks_concurrency: 4 + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + wait_stability_min_duration: 1m + symbols_flushers_concurrency: 4 + distributor: + ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + frontend: + parallelize_shardable_queries: true + scheduler_address: keda-autoscaling-values-mimir-query-scheduler-headless.citestns.svc:9095 + frontend_worker: + grpc_client_config: + max_send_msg_size: 419430400 + scheduler_address: keda-autoscaling-values-mimir-query-scheduler-headless.citestns.svc:9095 + ingester: + ring: + final_sleep: 0s + heartbeat_period: 2m + heartbeat_timeout: 10m + num_tokens: 512 + tokens_file_path: /data/tokens + unregister_on_shutdown: false + zone_awareness_enabled: true + ingester_client: + grpc_client_config: + max_recv_msg_size: 104857600 + max_send_msg_size: 104857600 + limits: + max_cache_freshness: 10m + max_query_parallelism: 240 + max_total_query_length: 12000h + memberlist: + abort_if_cluster_join_fails: false + compression_enabled: false + join_members: + - dns+keda-autoscaling-values-mimir-gossip-ring.citestns.svc.cluster.local.:7946 + querier: + max_concurrent: 16 + query_scheduler: + max_outstanding_requests_per_tenant: 800 + ruler: + alertmanager_url: dnssrvnoa+http://_http-metrics._tcp.keda-autoscaling-values-mimir-alertmanager-headless.citestns.svc.cluster.local./alertmanager + enable_api: true + rule_path: /data + ruler_storage: + backend: s3 + s3: + access_key_id: grafana-mimir + bucket_name: mimir-ruler + endpoint: keda-autoscaling-values-minio.citestns.svc:9000 + insecure: true + secret_access_key: supersecret + runtime_config: + file: /var/mimir/runtime.yaml + store_gateway: + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + kvstore: + prefix: multi-zone/ + tokens_file_path: /data/tokens + unregister_on_shutdown: false + wait_stability_min_duration: 1m + zone_awareness_enabled: true + usage_stats: + installation_mode: helm diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml new file mode 100644 index 00000000000..418f4918bdb --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/minio/create-bucket-job.yaml @@ -0,0 +1,47 @@ +--- +# Source: mimir-distributed/templates/minio/create-bucket-job.yaml +# Minio provides post-install hook to create bucket +# however the hook won't be executed if helm install is run +# with --wait flag. Hence this job is a workaround for that. +# See https://github.com/grafana/mimir/issues/2464 +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-values-mimir-make-minio-buckets-5.0.14 + namespace: "citestns" + labels: + app: mimir-distributed-make-bucket-job + release: keda-autoscaling-values + heritage: Helm +spec: + template: + metadata: + labels: + app: mimir-distributed-job + release: keda-autoscaling-values + spec: + restartPolicy: OnFailure + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: keda-autoscaling-values-minio + - secret: + name: keda-autoscaling-values-minio + containers: + - name: minio-mc + image: "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: keda-autoscaling-values-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml new file mode 100644 index 00000000000..18178fcca95 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -0,0 +1,125 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + nginx.conf: | + worker_processes 5; ## Default: 1 + error_log /dev/stderr error; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stderr main; + + sendfile on; + tcp_nopush on; + proxy_http_version 1.1; + resolver kube-dns.kube-system.svc.cluster.local.; + + # Ensure that X-Scope-OrgID is always present, default to the no_auth_tenant for backwards compatibility when multi-tenancy was turned off. + map $http_x_scope_orgid $ensured_x_scope_orgid { + default $http_x_scope_orgid; + "" "anonymous"; + } + + proxy_read_timeout 300; + server { + listen 8080; + listen [::]:8080; + + location = / { + return 200 'OK'; + auth_basic off; + } + + proxy_set_header X-Scope-OrgID $ensured_x_scope_orgid; + + # Distributor endpoints + location /distributor { + set $distributor keda-autoscaling-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location = /api/v1/push { + set $distributor keda-autoscaling-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location /otlp/v1/metrics { + set $distributor keda-autoscaling-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + + # Alertmanager endpoints + location /alertmanager { + set $alertmanager keda-autoscaling-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /multitenant_alertmanager/status { + set $alertmanager keda-autoscaling-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /api/v1/alerts { + set $alertmanager keda-autoscaling-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + + # Ruler endpoints + location /prometheus/config/v1/rules { + set $ruler keda-autoscaling-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location /prometheus/api/v1/rules { + set $ruler keda-autoscaling-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + location /prometheus/api/v1/alerts { + set $ruler keda-autoscaling-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location = /ruler/ring { + set $ruler keda-autoscaling-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + # Rest of /prometheus goes to the query frontend + location /prometheus { + set $query_frontend keda-autoscaling-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Buildinfo endpoint can go to any component + location = /api/v1/status/buildinfo { + set $query_frontend keda-autoscaling-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Compactor endpoint for uploading blocks + location /api/v1/upload/block/ { + set $compactor keda-autoscaling-values-mimir-compactor.citestns.svc.cluster.local.; + proxy_pass http://$compactor:8080$request_uri; + } + } + } diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml new file mode 100644 index 00000000000..d9241029480 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml @@ -0,0 +1,94 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + template: + metadata: + annotations: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: nginx + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + containers: + - name: nginx + image: docker.io/nginxinc/nginx-unprivileged:1.25-alpine + imagePullPolicy: IfNotPresent + ports: + - name: http-metric + containerPort: 8080 + protocol: TCP + env: + envFrom: + readinessProbe: + httpGet: + path: / + port: http-metric + initialDelaySeconds: 15 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: tmp + mountPath: /tmp + - name: docker-entrypoint-d-override + mountPath: /docker-entrypoint.d + resources: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-nginx + - name: tmp + emptyDir: {} + - name: docker-entrypoint-d-override + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml new file mode 100644 index 00000000000..83d5f1c0581 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml new file mode 100644 index 00000000000..f210825bfc7 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml @@ -0,0 +1,25 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - name: http-metric + port: 80 + targetPort: http-metric + protocol: TCP + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: nginx diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml new file mode 100644 index 00000000000..a2c533bf28c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -0,0 +1,114 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + name: keda-autoscaling-values-mimir-overrides-exporter + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: overrides-exporter + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: overrides-exporter + imagePullPolicy: IfNotPresent + args: + - "-target=overrides-exporter" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: + nodeSelector: + {} + affinity: + {} + + tolerations: + [] + terminationGracePeriodSeconds: 60 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml new file mode 100644 index 00000000000..54dea7d6b36 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml new file mode 100644 index 00000000000..10e02f30755 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: overrides-exporter diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000000..1c27980a925 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +--- +# Source: mimir-distributed/templates/podsecuritypolicy.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: keda-autoscaling-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + annotations: + "seccomp.security.alpha.kubernetes.io/allowedProfileNames": runtime/default +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'configMap' + - 'emptyDir' + - 'persistentVolumeClaim' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml new file mode 100644 index 00000000000..cf5ac9f2d05 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -0,0 +1,126 @@ +--- +# Source: mimir-distributed/templates/querier/querier-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + annotations: + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: querier + imagePullPolicy: IfNotPresent + args: + - "-target=querier" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml new file mode 100644 index 00000000000..62457af5c66 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/querier/querier-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-so.yaml new file mode 100644 index 00000000000..9ec08966573 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-so.yaml @@ -0,0 +1,55 @@ +--- +# Source: mimir-distributed/templates/querier/querier-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 120 + type: Percent + value: 10 + stabilizationWindowSeconds: 600 + scaleUp: + policies: + - periodSeconds: 120 + type: Percent + value: 50 + - periodSeconds: 120 + type: Pods + value: 15 + stabilizationWindowSeconds: 60 + maxReplicaCount: 10 + minReplicaCount: 2 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-values-mimir-querier + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: sum(max_over_time(cortex_query_scheduler_inflight_requests{container="query-scheduler",namespace="citestns",quantile="0.5"}[1m])) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "6" + customHeaders: "X-Scope-OrgID=tenant-1" + name: cortex_querier_hpa_default + type: prometheus + - metadata: + query: sum(rate(cortex_querier_request_duration_seconds_sum{container="querier",namespace="citestns"}[1m])) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "6" + customHeaders: "X-Scope-OrgID=tenant-1" + name: cortex_querier_hpa_default_requests_duration + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-svc.yaml new file mode 100644 index 00000000000..77a5009f3a5 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/querier/querier-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: querier diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 00000000000..baff2270bf3 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,122 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-frontend + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: query-frontend + imagePullPolicy: IfNotPresent + args: + - "-target=query-frontend" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Reduce the likelihood of queries hitting terminated query-frontends. + - "-server.grpc.keepalive.max-connection-age=30s" + - "-shutdown-delay=90s" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + tolerations: + [] + terminationGracePeriodSeconds: 390 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml new file mode 100644 index 00000000000..66151a6e3e4 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml new file mode 100644 index 00000000000..28ecee0696d --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-so.yaml @@ -0,0 +1,43 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 60 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-values-mimir-query-frontend + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="query-frontend",namespace="citestns"}[5m])) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0)[15m:]) * 1000 + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="query-frontend",namespace="citestns"}) and max by (pod) (up{container="query-frontend",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="query-frontend",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="query-frontend",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="query-frontend",namespace="citestns", reason="OOMKilled"}) or vector(0)) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 00000000000..21c53d37e07 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-frontend diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml new file mode 100644 index 00000000000..0073fab3f47 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml @@ -0,0 +1,117 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-scheduler + annotations: + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: query-scheduler + imagePullPolicy: IfNotPresent + args: + - "-target=query-scheduler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml new file mode 100644 index 00000000000..dabfa9f7a84 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-query-scheduler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml new file mode 100644 index 00000000000..b1a76461d51 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-query-scheduler-headless + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml new file mode 100644 index 00000000000..fe629f31b12 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/role.yaml new file mode 100644 index 00000000000..fa85a033e3f --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/role.yaml @@ -0,0 +1,16 @@ +--- +# Source: mimir-distributed/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: keda-autoscaling-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [keda-autoscaling-values-mimir] diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/rolebinding.yaml new file mode 100644 index 00000000000..6a84405fb3a --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: mimir-distributed/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: keda-autoscaling-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: keda-autoscaling-values-mimir +subjects: +- kind: ServiceAccount + name: keda-autoscaling-values-mimir +- kind: ServiceAccount + name: keda-autoscaling-values-mimir-distributed diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml new file mode 100644 index 00000000000..370525d002e --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-autoscaling.yaml @@ -0,0 +1,46 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-autoscaling.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-values-mimir-ruler + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + metricName: ruler_cpu_hpa_default + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m]))[15m:]) * 1000 + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + metricName: ruler_memory_hpa_default + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="citestns"})[15m:]) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml new file mode 100644 index 00000000000..af5d37e5291 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -0,0 +1,126 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keda-autoscaling-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + strategy: + rollingUpdate: + maxSurge: 50% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: ruler + imagePullPolicy: IfNotPresent + args: + - "-target=ruler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-distributor.remote-timeout=10s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + subPath: + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + tolerations: + [] + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml new file mode 100644 index 00000000000..15ea2d503d3 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml new file mode 100644 index 00000000000..39f3ceb2b30 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-so.yaml @@ -0,0 +1,45 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-so.yaml +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: keda-autoscaling-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + policies: + - periodSeconds: 600 + type: Percent + value: 10 + maxReplicaCount: 10 + minReplicaCount: 1 + pollingInterval: 10 + scaleTargetRef: + name: keda-autoscaling-values-mimir-ruler + apiVersion: apps/v1 + kind: Deployment + triggers: + - metadata: + query: max_over_time(sum(sum by (pod) (rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m])) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0)[15m:]) * 1000 + query: max_over_time(sum(rate(container_cpu_usage_seconds_total{container="ruler",namespace="citestns"}[5m]))[15m:]) * 1000 + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "0" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus + - metadata: + query: max_over_time(sum((sum by (pod) (container_memory_working_set_bytes{container="ruler",namespace="citestns"}) and max by (pod) (up{container="ruler",namespace="citestns"}) > 0) or vector(0))[15m:]) + sum(sum by (pod) (max_over_time(kube_pod_container_resource_requests{container="ruler",namespace="citestns", resource="memory"}[15m])) and max by (pod) (changes(kube_pod_container_status_restarts_total{container="ruler",namespace="citestns"}[15m]) > 0) and max by (pod) (kube_pod_container_status_last_terminated_reason{container="ruler",namespace="citestns", reason="OOMKilled"}) or vector(0)) + query: max_over_time(sum(container_memory_working_set_bytes{container="ruler",namespace="citestns"})[15m:]) + serverAddress: http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus + threshold: "107374182" + customHeaders: "X-Scope-OrgID=tenant-1" + type: prometheus diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml new file mode 100644 index 00000000000..bc98f530a77 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: ruler diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/runtime-configmap.yaml new file mode 100644 index 00000000000..0bc83bba0ca --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/templates/runtime-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: keda-autoscaling-values-mimir-runtime + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + runtime.yaml: | + + {} diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/serviceaccount.yaml new file mode 100644 index 00000000000..6eba30f131e --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: keda-autoscaling-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml new file mode 100644 index 00000000000..b189935bafe --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -0,0 +1,53 @@ +--- +# Source: mimir-distributed/templates/smoke-test/smoke-test-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: keda-autoscaling-values-mimir-smoke-test + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: smoke-test + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test + namespace: "citestns" +spec: + backoffLimit: 5 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: smoke-test + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: smoke-test + imagePullPolicy: IfNotPresent + args: + - "-tests.smoke-test" + - "-tests.write-endpoint=http://keda-autoscaling-values-mimir-nginx.citestns.svc:80" + - "-tests.read-endpoint=http://keda-autoscaling-values-mimir-nginx.citestns.svc:80/prometheus" + - "-tests.tenant-id=" + - "-tests.write-read-series-test.num-series=1000" + - "-tests.write-read-series-test.max-query-age=48h" + - "-server.metrics-port=8080" + volumeMounts: + env: + envFrom: + restartPolicy: OnFailure + volumes: diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml new file mode 100644 index 00000000000..5cd544bc308 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: keda-autoscaling-values-mimir-store-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + maxUnavailable: 1 diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 00000000000..fdc12fd9d3c --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,432 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: keda-autoscaling-values-mimir-store-gateway-headless + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: keda-autoscaling-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + nodeSelector: + {} + affinity: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + tolerations: + [] + terminationGracePeriodSeconds: 240 + volumes: + - name: config + configMap: + name: keda-autoscaling-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: keda-autoscaling-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + envFrom: diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 00000000000..2dfbdf7ac80 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-store-gateway-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway diff --git a/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 00000000000..005ac4904f4 --- /dev/null +++ b/operations/helm/tests/keda-autoscaling-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: keda-autoscaling-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: keda-autoscaling-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c