From afd63c598126a4f8b2c87809af29c896e1a41b02 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Wed, 21 Sep 2022 10:02:12 -0600 Subject: [PATCH] [helm] Add Loki Canary to Helm Chart (#7173) **What this PR does / why we need it**: This PR adds the Loki canary as well as the GEL provisioner to the Loki helm chart. The Loki canary expands the self-monitoring functionality of the Helm chart, giving operators insight beyond whether Loki is just running, but also if it's functioning properly. The GEL provisioner was necessary for the canary to work when the enterprise option (`enterprise.enabled = true`) is enabled. Since enabling enterprise in the helm chart also enables auth/multi-tenancy, the addition of the provisioner allows the helm chart to automatically create a tenant for the canary via the GEL admin API, and to create applicable access policies and tokens. Furthermore, additional tenants can also be specified, which will be provisioned, creating a read and write token for each that will be stored in k8s secrets. **Which issue(s) this PR fixes**: Fixes #7019 Co-authored-by: Karsten Jeschkies Co-authored-by: Dylan Guedes --- production/helm/loki/CHANGELOG.md | 4 + production/helm/loki/Chart.lock | 2 +- production/helm/loki/Chart.yaml | 3 +- production/helm/loki/README.md | 43 ++++++- production/helm/loki/templates/_helpers.tpl | 39 +++++- .../loki/templates/loki-canary/_helpers.tpl | 40 ++++++ .../loki/templates/loki-canary/daemonset.yaml | 97 ++++++++++++++ .../loki/templates/loki-canary/service.yaml | 18 +++ .../templates/loki-canary/serviceaccount.yaml | 14 +++ .../monitoring/_helpers-monitoring.tpl | 13 +- .../templates/monitoring/logs-instance.yaml | 1 - .../templates/monitoring/servicemonitor.yaml | 5 +- .../loki/templates/provisioner/_helpers.yaml | 36 ++++++ .../provisioner/job-provisioner.yaml | 119 ++++++++++++++++++ .../provisioner/role-provisioner.yaml | 20 +++ .../provisioner/rolebinding-provisioner.yaml | 24 ++++ .../serviceaccount-provisioner.yaml | 17 +++ .../loki/templates/tokengen/job-tokengen.yaml | 7 +- production/helm/loki/values.yaml | 92 +++++++++++++- tools/dev/k3d/Makefile | 11 +- .../environments/enterprise-logs/main.jsonnet | 69 +++++----- .../environments/enterprise-logs/spec.json | 2 +- 22 files changed, 609 insertions(+), 67 deletions(-) create mode 100644 production/helm/loki/templates/loki-canary/_helpers.tpl create mode 100644 production/helm/loki/templates/loki-canary/daemonset.yaml create mode 100644 production/helm/loki/templates/loki-canary/service.yaml create mode 100644 production/helm/loki/templates/loki-canary/serviceaccount.yaml create mode 100644 production/helm/loki/templates/provisioner/_helpers.yaml create mode 100644 production/helm/loki/templates/provisioner/job-provisioner.yaml create mode 100644 production/helm/loki/templates/provisioner/role-provisioner.yaml create mode 100644 production/helm/loki/templates/provisioner/rolebinding-provisioner.yaml create mode 100644 production/helm/loki/templates/provisioner/serviceaccount-provisioner.yaml diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 046c53c5c94af..347730a2835b6 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -11,6 +11,10 @@ Entries should be ordered as follows: Entries should include a reference to the pull request that introduced the change. +## 3.1.0 + +- [FEATURE] Loki canary and GEL token provisioner added. The GEL token provisioner will provision a tenant and token to be used by the self-monitoring features (including the canary), as well as any additional tenants specified. A k8s secret will be created with a read and write token for each additional tenant specified. + ## 3.0.4 - [CHANGE] Default minio replicas to 1 node with 2 drives. The old config used the default, which was 16 nodes with 1 drive each. diff --git a/production/helm/loki/Chart.lock b/production/helm/loki/Chart.lock index bf4a71c2c80ef..547f0f0d760e6 100644 --- a/production/helm/loki/Chart.lock +++ b/production/helm/loki/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: https://grafana.github.io/helm-charts version: 0.2.3 digest: sha256:74ef214ca08874662ab403a2e5eea39df26ad690962fa19f9ff69cf551550ff2 -generated: "2022-08-31T15:16:16.581500076+02:00" +generated: "2022-09-14T10:22:56.1397723-06:00" diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index d13a00712f1c7..c9ee34c59ffb0 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -1,9 +1,10 @@ +--- apiVersion: v2 name: loki description: Helm chart for Grafana Loki in simple, scalable mode type: application appVersion: 2.6.1 -version: 3.0.9 +version: 3.1.0 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index a7540d00d4428..0f89ac01f1d75 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 3.0.9](https://img.shields.io/badge/Version-3.0.9-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) +![Version: 3.1.0](https://img.shields.io/badge/Version-3.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) Helm chart for Grafana Loki in simple, scalable mode @@ -142,7 +142,10 @@ monitoring: | Key | Type | Default | Description | |-----|------|---------|-------------| | enterprise.adminApi | object | `{"enabled":true}` | If enabled, the correct admin_client storage will be configured. If disabled while running enterprise, make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`. | -| enterprise.config | string | `"{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") }}\nadmin_client:\n storage:\n s3:\n bucket_name: {{ .Values.loki.storage.bucketNames.admin }}\n{{- end }}\n{{- end }}\nauth:\n type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ .Release.Name }}\nlicense:\n path: /etc/loki/license/license.jwt\n"` | | +| enterprise.adminTokenSecret | string | `nil` | Alternative name for admin token secret, needed by tokengen and provisioner jobs | +| enterprise.canarySecret | string | `nil` | Alternative name of the secret to store token for the canary | +| enterprise.cluster_name | string | `nil` | Optional name of the GEL cluster, otherwise will use .Release.Name The cluster name must match what is in your GEL license | +| enterprise.config | string | `"{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") }}\nadmin_client:\n storage:\n s3:\n bucket_name: {{ .Values.loki.storage.bucketNames.admin }}\n{{- end }}\n{{- end }}\nauth:\n type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n path: /etc/loki/license/license.jwt\n"` | | | enterprise.enabled | bool | `false` | | | enterprise.externalLicenseName | string | `nil` | Name of external licesne secret to use | | enterprise.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | @@ -151,15 +154,27 @@ monitoring: | enterprise.image.tag | string | `"v1.4.0"` | Overrides the image tag whose default is the chart's appVersion | | enterprise.license | object | `{"contents":"NOTAVALIDLICENSE"}` | Grafana Enterprise Logs license In order to use Grafana Enterprise Logs features, you will need to provide the contents of your Grafana Enterprise Logs license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. To set the license contents, use the flag `--set-file 'license.contents=./license.jwt'` | | enterprise.nginxConfig.file | string | `"worker_processes 5; ## Default: 1\nerror_log /dev/stderr;\npid /tmp/nginx.pid;\nworker_rlimit_nofile 8192;\n\nevents {\n worker_connections 4096; ## Default: 1024\n}\n\nhttp {\n client_body_temp_path /tmp/client_temp;\n proxy_temp_path /tmp/proxy_temp_path;\n fastcgi_temp_path /tmp/fastcgi_temp;\n uwsgi_temp_path /tmp/uwsgi_temp;\n scgi_temp_path /tmp/scgi_temp;\n\n proxy_http_version 1.1;\n\n default_type application/octet-stream;\n log_format {{ .Values.gateway.nginxConfig.logFormat }}\n\n {{- if .Values.gateway.verboseLogging }}\n access_log /dev/stderr main;\n {{- else }}\n\n map $status $loggable {\n ~^[23] 0;\n default 1;\n }\n access_log /dev/stderr main if=$loggable;\n {{- end }}\n\n sendfile on;\n tcp_nopush on;\n resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }};\n\n {{- with .Values.gateway.nginxConfig.httpSnippet }}\n {{ . | nindent 2 }}\n {{- end }}\n\n server {\n listen 8080;\n\n {{- if .Values.gateway.basicAuth.enabled }}\n auth_basic \"Loki\";\n auth_basic_user_file /etc/nginx/secrets/.htpasswd;\n {{- end }}\n\n location = / {\n return 200 'OK';\n auth_basic off;\n }\n\n location = /api/prom/push {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /api/prom/tail {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n }\n\n location ~ /api/prom/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /prometheus/api/v1/alerts.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /prometheus/api/v1/rules.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /loki/api/v1/push {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location = /loki/api/v1/tail {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n proxy_set_header Upgrade $http_upgrade;\n proxy_set_header Connection \"upgrade\";\n }\n\n location ~ /loki/api/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /admin/api/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /compactor/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /distributor/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ring {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ingester/.* {\n proxy_pass http://{{ include \"loki.writeFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /ruler/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n location ~ /scheduler/.* {\n proxy_pass http://{{ include \"loki.readFullname\" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;\n }\n\n {{- with .Values.gateway.nginxConfig.serverSnippet }}\n {{ . | nindent 4 }}\n {{- end }}\n }\n}\n"` | | -| enterprise.tokengen | object | `{"adminTokenSecret":"gel-admin-token","annotations":{},"enabled":true,"env":[],"extraArgs":[],"extraVolumeMounts":[],"extraVolumes":[],"image":"bitnami/kubectl","labels":{},"securityContext":{"fsGroup":10001,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10001},"tolerations":[]}` | Configuration for `tokengen` target | -| enterprise.tokengen.adminTokenSecret | string | `"gel-admin-token"` | Name of the secret to store the admin token in | +| enterprise.provisioner | object | `{"annotations":{},"enabled":true,"env":[],"image":{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"grafana/enterprise-logs-provisioner","tag":null},"labels":{},"priorityClassName":null,"provisionedSecretPrefix":"{{ include \"loki.name\" . }}-provisioned","securityContext":{"fsGroup":10001,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10001},"tenants":[]}` | Configuration for `provisioner` target | +| enterprise.provisioner.annotations | object | `{}` | Additional annotations for the `provisioner` Job | +| enterprise.provisioner.enabled | bool | `true` | Whether the job should be part of the deployment | +| enterprise.provisioner.env | list | `[]` | Additional Kubernetes environment | +| enterprise.provisioner.image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"grafana/enterprise-logs-provisioner","tag":null}` | Provisioner image to Utilize | +| enterprise.provisioner.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | +| enterprise.provisioner.image.registry | string | `"docker.io"` | The Docker registry | +| enterprise.provisioner.image.repository | string | `"grafana/enterprise-logs-provisioner"` | Docker image repository | +| enterprise.provisioner.image.tag | string | `nil` | Overrides the image tag whose default is the chart's appVersion | +| enterprise.provisioner.labels | object | `{}` | Additional labels for the `provisioner` Job | +| enterprise.provisioner.priorityClassName | string | `nil` | The name of the PriorityClass for provisioner Job | +| enterprise.provisioner.provisionedSecretPrefix | string | `"{{ include \"loki.name\" . }}-provisioned"` | Name of the secret to store provisioned tokens in | +| enterprise.provisioner.securityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10001}` | Run containers as user `enterprise-logs(uid=10001)` | +| enterprise.provisioner.tenants | list | `[]` | Tenants to be created. Each tenant will get a read and write policy and associated token. | +| enterprise.tokengen | object | `{"annotations":{},"enabled":true,"env":[],"extraArgs":[],"extraVolumeMounts":[],"extraVolumes":[],"labels":{},"securityContext":{"fsGroup":10001,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10001},"tolerations":[]}` | Configuration for `tokengen` target | | enterprise.tokengen.annotations | object | `{}` | Additional annotations for the `tokengen` Job | | enterprise.tokengen.enabled | bool | `true` | Whether the job should be part of the deployment | | enterprise.tokengen.env | list | `[]` | Additional Kubernetes environment | | enterprise.tokengen.extraArgs | list | `[]` | Additional CLI arguments for the `tokengen` target | | enterprise.tokengen.extraVolumeMounts | list | `[]` | Additional volume mounts for Pods | | enterprise.tokengen.extraVolumes | list | `[]` | Additional volumes for Pods | -| enterprise.tokengen.image | string | `"bitnami/kubectl"` | Job Create Secret Stage Image to Utilize | | enterprise.tokengen.labels | object | `{}` | Additional labels for the `tokengen` Job | | enterprise.tokengen.securityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsNonRoot":true,"runAsUser":10001}` | Run containers as user `enterprise-logs(uid=10001)` | | enterprise.tokengen.tolerations | list | `[]` | Tolerations for tokengen Job | @@ -235,6 +250,10 @@ monitoring: | ingress.paths.read[6] | string | `"/prometheus/api/v1/alerts"` | | | ingress.paths.write[0] | string | `"/api/prom/push"` | | | ingress.paths.write[1] | string | `"/loki/api/v1/push"` | | +| kubectlImage.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | +| kubectlImage.registry | string | `"docker.io"` | The Docker registry | +| kubectlImage.repository | string | `"bitnami/kubectl"` | Docker image repository | +| kubectlImage.tag | string | `nil` | Overrides the image tag whose default is the chart's appVersion | | loki.auth_enabled | bool | `true` | | | loki.commonConfig | object | `{"path_prefix":"/var/loki","replication_factor":3}` | Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration | | loki.compactor | object | `{}` | Optional compactor configuration | @@ -284,10 +303,24 @@ monitoring: | monitoring.selfMonitoring.logsInstance.annotations | object | `{}` | LogsInstance annotations | | monitoring.selfMonitoring.logsInstance.labels | object | `{}` | Additional LogsInstance labels | | monitoring.selfMonitoring.logsInstance.namespace | string | `nil` | Alternative namespace for LogsInstance resources | +| monitoring.selfMonitoring.lokiCanary.annotations | object | `{}` | Additional annotations for the `loki-canary` Daemonset | +| monitoring.selfMonitoring.lokiCanary.enabled | bool | `true` | | +| monitoring.selfMonitoring.lokiCanary.extraArgs | list | `[]` | Additional CLI arguments for the `loki-canary' command | +| monitoring.selfMonitoring.lokiCanary.extraEnv | list | `[]` | Environment variables to add to the canary pods | +| monitoring.selfMonitoring.lokiCanary.extraEnvFrom | list | `[]` | Environment variables from secrets or configmaps to add to the canary pods | +| monitoring.selfMonitoring.lokiCanary.image | object | `{"pullPolicy":"IfNotPresent","registry":"docker.io","repository":"grafana/loki-canary","tag":null}` | Image to use for loki canary | +| monitoring.selfMonitoring.lokiCanary.image.pullPolicy | string | `"IfNotPresent"` | Docker image pull policy | +| monitoring.selfMonitoring.lokiCanary.image.registry | string | `"docker.io"` | The Docker registry | +| monitoring.selfMonitoring.lokiCanary.image.repository | string | `"grafana/loki-canary"` | Docker image repository | +| monitoring.selfMonitoring.lokiCanary.image.tag | string | `nil` | Overrides the image tag whose default is the chart's appVersion | +| monitoring.selfMonitoring.lokiCanary.nodeSelector | object | `{}` | Node selector for canary pods | +| monitoring.selfMonitoring.lokiCanary.resources | object | `{}` | Resource requests and limits for the canary | +| monitoring.selfMonitoring.lokiCanary.tolerations | list | `[]` | Tolerations for canary pods | | monitoring.selfMonitoring.podLogs.annotations | object | `{}` | PodLogs annotations | | monitoring.selfMonitoring.podLogs.labels | object | `{}` | Additional PodLogs labels | | monitoring.selfMonitoring.podLogs.namespace | string | `nil` | Alternative namespace for PodLogs resources | | monitoring.selfMonitoring.podLogs.relabelings | list | `[]` | PodLogs relabel configs to apply to samples before scraping https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig | +| monitoring.selfMonitoring.tenant | string | `"self-monitoring"` | Tenant to use for self monitoring | | monitoring.serviceMonitor.annotations | object | `{}` | ServiceMonitor annotations | | monitoring.serviceMonitor.enabled | bool | `true` | If enabled, ServiceMonitor resources for Prometheus Operator are created | | monitoring.serviceMonitor.interval | string | `nil` | ServiceMonitor scrape interval | diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index 9c9d4c6db2d3a..44cfbb55c29ea 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -128,7 +128,6 @@ Docker image name for enterprise logs {{- define "loki.enterpriseImage" -}} {{- $dict := dict "service" .Values.enterprise.image "global" .Values.global.image "defaultVersion" .Values.enterprise.version -}} {{- include "loki.baseImage" $dict -}} -{{/* {{- printf "foo" -}} */}} {{- end -}} {{/* @@ -138,6 +137,14 @@ Docker image name {{- if .Values.enterprise.enabled -}}{{- include "loki.enterpriseImage" . -}}{{- else -}}{{- include "loki.lokiImage" . -}}{{- end -}} {{- end -}} +{{/* +Docker image name for kubectl container +*/}} +{{- define "loki.kubectlImage" -}} +{{- $dict := dict "service" .Values.kubectlImage "global" .Values.global.image "defaultVersion" "latest" -}} +{{- include "loki.baseImage" $dict -}} +{{- end -}} + {{/* Generated storage config for loki common config */}} @@ -322,3 +329,33 @@ Create the service endpoint including port for MinIO. {{ include "loki.name" . }}-memberlist {{- end -}} +{{/* Determine the public host for the Loki cluster */}} +{{- define "loki.host" -}} +{{- $isSingleBinary := eq (include "loki.deployment.isSingleBinary" .) "true" -}} +{{- $url := printf "%s.%s.svc.%s" (include "loki.gatewayFullname" .) .Release.Namespace .Values.global.clusterDomain }} +{{- if and $isSingleBinary (not .Values.gateway.enabled) }} + {{- $url = printf "%s.%s.svc.%s:3100" (include "loki.singleBinaryFullname" .) .Release.Namespace .Values.global.clusterDomain }} +{{- end }} +{{- printf "%s" $url -}} +{{- end -}} + +{{/* Determine the public endpoint for the Loki cluster */}} +{{- define "loki.address" -}} +{{- printf "http://%s" (include "loki.host" . ) -}} +{{- end -}} + +{{/* Name of the cluster */}} +{{- define "loki.clusterName" -}} +{{- $name := .Values.enterprise.cluster_name | default .Release.Name }} +{{- printf "%s" $name -}} +{{- end -}} + +{{/* Name of kubernetes secret to persist GEL admin token to */}} +{{- define "enterprise-logs.adminTokenSecret" }} +{{- .Values.enterprise.adminTokenSecret | default (printf "%s-admin-token" (include "loki.name" . )) -}} +{{- end -}} + +{{/* Name of kubernetes secret to persist canary credentials in */}} +{{- define "enterprise-logs.canarySecret" }} +{{- .Values.enterprise.canarySecret | default (printf "%s-canary-secret" (include "loki.name" . )) -}} +{{- end -}} diff --git a/production/helm/loki/templates/loki-canary/_helpers.tpl b/production/helm/loki/templates/loki-canary/_helpers.tpl new file mode 100644 index 0000000000000..f4cddcac1dcb5 --- /dev/null +++ b/production/helm/loki/templates/loki-canary/_helpers.tpl @@ -0,0 +1,40 @@ +{{/* +canary fullname +*/}} +{{- define "loki-canary.fullname" -}} +{{ include "loki.name" . }}-canary +{{- end }} + +{{/* +canary common labels +*/}} +{{- define "loki-canary.labels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: canary +{{- end }} + +{{/* +canary selector labels +*/}} +{{- define "loki-canary.selectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: canary +{{- end }} + +{{/* +Docker image name for loki-canary +*/}} +{{- define "loki-canary.image" -}} +{{- $dict := dict "service" .Values.monitoring.selfMonitoring.lokiCanary.image "global" .Values.global.image "defaultVersion" "latest" -}} +{{- include "loki.baseImage" $dict -}} +{{- end -}} + +{{/* +canry priority class name +*/}} +{{- define "loki-canary.priorityClassName" -}} +{{- $pcn := coalesce .Values.global.priorityClassName .Values.read.priorityClassName -}} +{{- if $pcn }} +priorityClassName: {{ $pcn }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/loki-canary/daemonset.yaml b/production/helm/loki/templates/loki-canary/daemonset.yaml new file mode 100644 index 0000000000000..64f95eeea3c8d --- /dev/null +++ b/production/helm/loki/templates/loki-canary/daemonset.yaml @@ -0,0 +1,97 @@ +{{- with .Values.monitoring.selfMonitoring.lokiCanary -}} +{{- if .enabled -}} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "loki-canary.fullname" $ }} + labels: + {{- include "loki-canary.labels" $ | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "loki-canary.selectorLabels" $ | nindent 6 }} + template: + metadata: + annotations: + {{- with .annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "loki-canary.selectorLabels" $ | nindent 8 }} + spec: + serviceAccountName: {{ include "loki-canary.fullname" $ }} + {{- with $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- include "loki-canary.priorityClassName" $ | nindent 6 }} + securityContext: + {{- toYaml $.Values.loki.podSecurityContext | nindent 8 }} + containers: + - name: loki-canary + image: {{ include "loki-canary.image" $ }} + imagePullPolicy: {{ $.Values.loki.image.pullPolicy }} + args: + - -addr={{- include "loki.host" $ }} + - -labelname=pod + - -labelvalue=$(POD_NAME) + {{- if $.Values.enterprise.enabled }} + - -user=$(USER) + - -pass=$(PASS) + {{- else if $.Values.loki.auth_enabled }} + - -user={{ $.Values.monitoring.selfMonitoring.tenant }} + {{- end }} + {{- with .extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml $.Values.loki.containerSecurityContext | nindent 12 }} + ports: + - name: http-metrics + containerPort: 3500 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{ if $.Values.enterprise.enabled }} + - name: USER + valueFrom: + secretKeyRef: + name: {{ include "enterprise-logs.canarySecret" $ }} + key: username + - name: PASS + valueFrom: + secretKeyRef: + name: {{ include "enterprise-logs.canarySecret" $ }} + key: password + {{- end -}} + {{- with .extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .extraEnvFrom }} + envFrom: + {{- toYaml . | nindent 12 }} + {{- end }} + readinessProbe: + httpGet: + path: /metrics + port: http-metrics + initialDelaySeconds: 15 + timeoutSeconds: 1 + {{- with .resources}} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/production/helm/loki/templates/loki-canary/service.yaml b/production/helm/loki/templates/loki-canary/service.yaml new file mode 100644 index 0000000000000..ef12be38e9a4b --- /dev/null +++ b/production/helm/loki/templates/loki-canary/service.yaml @@ -0,0 +1,18 @@ +{{- if .Values.monitoring.selfMonitoring.lokiCanary.enabled -}} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "loki-canary.fullname" . }} + labels: + {{- include "loki-canary.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 3500 + targetPort: http-metrics + protocol: TCP + selector: + {{- include "loki-canary.selectorLabels" . | nindent 4 }} +{{- end -}} diff --git a/production/helm/loki/templates/loki-canary/serviceaccount.yaml b/production/helm/loki/templates/loki-canary/serviceaccount.yaml new file mode 100644 index 0000000000000..6bba15ffb12fe --- /dev/null +++ b/production/helm/loki/templates/loki-canary/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.monitoring.selfMonitoring.lokiCanary.enabled -}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "loki-canary.fullname" . }} + labels: + {{- include "loki-canary.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": post-install + {{- with .Values.monitoring.selfMonitoring.lokiCanary.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/production/helm/loki/templates/monitoring/_helpers-monitoring.tpl b/production/helm/loki/templates/monitoring/_helpers-monitoring.tpl index e40551721ae87..ed6936497effc 100644 --- a/production/helm/loki/templates/monitoring/_helpers-monitoring.tpl +++ b/production/helm/loki/templates/monitoring/_helpers-monitoring.tpl @@ -11,7 +11,18 @@ Client definition for LogsInstance {{- end }} - url: {{ $url }} externalLabels: - cluster: {{ include "loki.fullname" . -}} + cluster: {{ include "loki.fullname" . }} + {{- if .Values.enterprise.enabled }} + basicAuth: + username: + name: {{ include "enterprise-logs.canarySecret" . }} + key: username + password: + name: {{ include "enterprise-logs.canarySecret" . }} + key: password + {{- else if .Values.loki.auth_enabled }} + tenantId: {{ .Values.monitoring.selfMonitoring.tenant }} + {{- end }} {{- end -}} {{/* diff --git a/production/helm/loki/templates/monitoring/logs-instance.yaml b/production/helm/loki/templates/monitoring/logs-instance.yaml index bd60b41867fdd..f288e2c417ebf 100644 --- a/production/helm/loki/templates/monitoring/logs-instance.yaml +++ b/production/helm/loki/templates/monitoring/logs-instance.yaml @@ -16,7 +16,6 @@ metadata: {{- end }} spec: clients: - #TODO: need to support auth here {{- include "loki.logsInstanceClient" $ | nindent 4}} # Supply an empty namespace selector to look in all namespaces. Remove diff --git a/production/helm/loki/templates/monitoring/servicemonitor.yaml b/production/helm/loki/templates/monitoring/servicemonitor.yaml index f2e5cc886d00f..3245cdbda72ac 100644 --- a/production/helm/loki/templates/monitoring/servicemonitor.yaml +++ b/production/helm/loki/templates/monitoring/servicemonitor.yaml @@ -4,7 +4,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ include "loki.readFullname" $ }} + name: {{ include "loki.fullname" $ }} {{- with .namespace }} namespace: {{ . }} {{- end }} @@ -13,7 +13,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} labels: - {{- include "loki.readLabels" $ | nindent 4 }} + {{- include "loki.labels" $ | nindent 4 }} {{- with .labels }} {{- toYaml . | nindent 4 }} {{- end }} @@ -32,6 +32,7 @@ spec: - "false" endpoints: - port: http-metrics + path: /metrics {{- with .interval }} interval: {{ . }} {{- end }} diff --git a/production/helm/loki/templates/provisioner/_helpers.yaml b/production/helm/loki/templates/provisioner/_helpers.yaml new file mode 100644 index 0000000000000..be4a7cba5c183 --- /dev/null +++ b/production/helm/loki/templates/provisioner/_helpers.yaml @@ -0,0 +1,36 @@ +{{/* +provisioner fullname +*/}} +{{- define "enterprise-logs.provisionerFullname" -}} +{{ include "loki.name" . }}-provisioner +{{- end }} + +{{/* +provisioner common labels +*/}} +{{- define "enterprise-logs.provisionerLabels" -}} +{{ include "loki.labels" . }} +app.kubernetes.io/component: provisioner +{{- end }} + +{{/* +provisioner selector labels +*/}} +{{- define "enterprise-logs.provisionerSelectorLabels" -}} +{{ include "loki.selectorLabels" . }} +app.kubernetes.io/component: provisioner +{{- end }} + +{{/* +provisioner image name +*/}} +{{- define "enterprise-logs.provisionerImage" -}} +{{- $dict := dict "service" .Values.enterprise.provisioner.image "global" .Values.global.image "defaultVersion" "latest" -}} +{{- include "loki.baseImage" $dict -}} +{{- end -}} + +{{/* Prefix for provisioned secrets created for each provisioned tenant */}} +{{- define "enterprise-logs.provisionedSecretPrefix" }} +{{- .Values.enterprise.provisioner.provisionedSecretPrefix | default (printf "%s-provisioned" (include "loki.name" . )) -}} +{{- end -}} + diff --git a/production/helm/loki/templates/provisioner/job-provisioner.yaml b/production/helm/loki/templates/provisioner/job-provisioner.yaml new file mode 100644 index 0000000000000..b05112ea7b7b6 --- /dev/null +++ b/production/helm/loki/templates/provisioner/job-provisioner.yaml @@ -0,0 +1,119 @@ +{{ if and .Values.enterprise.provisioner.enabled .Values.enterprise.enabled }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "enterprise-logs.provisionerFullname" . }} + labels: + {{- include "enterprise-logs.provisionerLabels" . | nindent 4 }} + {{- with .Values.enterprise.provisioner.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.enterprise.provisioner.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": post-install +spec: + backoffLimit: 6 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + {{- include "enterprise-logs.provisionerSelectorLabels" . | nindent 8 }} + {{- with .Values.enterprise.provisioner.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.enterprise.provisioner.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.enterprise.provisioner.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + securityContext: + {{- toYaml .Values.enterprise.provisioner.securityContext | nindent 8 }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + initContainers: + - name: provisioner + image: {{ template "enterprise-logs.provisionerImage" . }} + imagePullPolicy: {{ .Values.enterprise.provisioner.image.pullPolicy }} + command: + - /bin/sh + - -euc + - | + {{- range .Values.enterprise.provisioner.tenants }} + /usr/bin/enterprise-logs-provisioner \ + -bootstrap-path=/bootstrap \ + -cluster-name={{ include "loki.clusterName" $ }} \ + -gel-url={{ include "loki.address" $ }} \ + -instance={{ . }} \ + -access-policy=write-{{ . }}:{{ . }}:logs:write \ + -access-policy=read-{{ . }}:{{ . }}:logs:read \ + -token=write-{{ . }} \ + -token=read-{{ . }} + {{- end -}} + + {{- with .Values.monitoring.selfMonitoring.tenant }} + /usr/bin/enterprise-logs-provisioner \ + -bootstrap-path=/bootstrap \ + -cluster-name={{ include "loki.clusterName" $ }} \ + -gel-url={{ include "loki.address" $ }} \ + -instance={{ . }} \ + -access-policy=canary:{{ . }}:logs:write,logs:read \ + -token=canary + {{- end }} + volumeMounts: + {{- if .Values.enterprise.provisioner.extraVolumeMounts }} + {{ toYaml .Values.enterprise.provisioner.extraVolumeMounts | nindent 12 }} + {{- end }} + - name: bootstrap + mountPath: /bootstrap + - name: admin-token + mountPath: /bootstrap/token + subPath: token + {{- with .Values.enterprise.provisioner.env }} + env: + {{ toYaml . | nindent 12 }} + {{- end }} + containers: + - name: create-secret + image: {{ include "loki.kubectlImage" . }} + imagePullPolicy: {{ .Values.kubectlImage.pullPolicy }} + command: + - /bin/bash + - -euc + - | + {{- range .Values.enterprise.provisioner.tenants }} + kubectl create secret generic "{{ include "enterprise-logs.provisionedSecretPrefix" $ }}-{{ . }}" \ + --from-literal=token-write="$(cat /bootstrap/token-write-{{ . }})" \ + --from-literal=token-read="$(cat /bootstrap/token-read-{{ . }})" + {{- end -}} + {{- with .Values.monitoring.selfMonitoring.tenant }} + kubectl create secret generic "{{ include "enterprise-logs.canarySecret" $ }}" \ + --from-literal=username="{{ $.Values.monitoring.selfMonitoring.tenant }}" \ + --from-literal=password="$(cat /bootstrap/token-canary)" + {{- end }} + volumeMounts: + {{- if .Values.enterprise.provisioner.extraVolumeMounts }} + {{ toYaml .Values.enterprise.provisioner.extraVolumeMounts | nindent 12 }} + {{- end }} + - name: bootstrap + mountPath: /bootstrap + restartPolicy: OnFailure + serviceAccount: {{ include "enterprise-logs.provisionerFullname" . }} + serviceAccountName: {{ include "enterprise-logs.provisionerFullname" . }} + volumes: + - name: admin-token + secret: + secretName: "{{ include "enterprise-logs.adminTokenSecret" . }}" + - name: bootstrap + emptyDir: {} +{{- end }} diff --git a/production/helm/loki/templates/provisioner/role-provisioner.yaml b/production/helm/loki/templates/provisioner/role-provisioner.yaml new file mode 100644 index 0000000000000..6163ac07f1d46 --- /dev/null +++ b/production/helm/loki/templates/provisioner/role-provisioner.yaml @@ -0,0 +1,20 @@ +{{ if and .Values.enterprise.provisioner.enabled .Values.enterprise.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "enterprise-logs.provisionerFullname" . }} + labels: + {{- include "enterprise-logs.provisionerLabels" . | nindent 4 }} + {{- with .Values.enterprise.provisioner.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.enterprise.provisioner.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": post-install +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +{{- end }} diff --git a/production/helm/loki/templates/provisioner/rolebinding-provisioner.yaml b/production/helm/loki/templates/provisioner/rolebinding-provisioner.yaml new file mode 100644 index 0000000000000..8138e924314a0 --- /dev/null +++ b/production/helm/loki/templates/provisioner/rolebinding-provisioner.yaml @@ -0,0 +1,24 @@ +{{ if and .Values.enterprise.provisioner.enabled .Values.enterprise.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "enterprise-logs.provisionerFullname" . }} + labels: + {{- include "enterprise-logs.provisionerLabels" . | nindent 4 }} + {{- with .Values.enterprise.provisioner.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.enterprise.provisioner.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": post-install +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "enterprise-logs.provisionerFullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "enterprise-logs.provisionerFullname" . }} +{{- end }} diff --git a/production/helm/loki/templates/provisioner/serviceaccount-provisioner.yaml b/production/helm/loki/templates/provisioner/serviceaccount-provisioner.yaml new file mode 100644 index 0000000000000..c8132f6ab31c7 --- /dev/null +++ b/production/helm/loki/templates/provisioner/serviceaccount-provisioner.yaml @@ -0,0 +1,17 @@ +{{ if and .Values.enterprise.provisioner.enabled .Values.enterprise.enabled }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "enterprise-logs.provisionerFullname" . }} + labels: + {{- include "enterprise-logs.provisionerLabels" . | nindent 4 }} + {{- with .Values.enterprise.provisioner.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- with .Values.enterprise.provisioner.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + "helm.sh/hook": post-install +{{- end }} diff --git a/production/helm/loki/templates/tokengen/job-tokengen.yaml b/production/helm/loki/templates/tokengen/job-tokengen.yaml index 589fb0e28d9f4..cf5f8001c775b 100644 --- a/production/helm/loki/templates/tokengen/job-tokengen.yaml +++ b/production/helm/loki/templates/tokengen/job-tokengen.yaml @@ -69,12 +69,13 @@ spec: {{- end }} containers: - name: create-secret - image: {{ .Values.enterprise.tokengen.image }} - imagePullPolicy: {{ .Values.loki.image.pullPolicy }} + image: {{ include "loki.kubectlImage" . }} + imagePullPolicy: {{ .Values.kubectlImage.pullPolicy }} command: - /bin/bash - -euc - - kubectl create secret generic {{ .Values.enterprise.tokengen.adminTokenSecret }} --from-file=token=/shared/admin-token + - | + kubectl create secret generic "{{ include "enterprise-logs.adminTokenSecret" . }}" --from-file=token=/shared/admin-token volumeMounts: {{- if .Values.enterprise.tokengen.extraVolumeMounts }} {{ toYaml .Values.enterprise.tokengen.extraVolumeMounts | nindent 12 }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index a15a2f1d3b86d..316ef95d5de0e 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -21,6 +21,16 @@ fullnameOverride: null # -- Image pull secrets for Docker images imagePullSecrets: [] +kubectlImage: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: bitnami/kubectl + # -- Overrides the image tag whose default is the chart's appVersion + tag: null + # -- Docker image pull policy + pullPolicy: IfNotPresent + loki: # Configures the readiness probe for all of the Loki pods readinessProbe: @@ -244,6 +254,10 @@ enterprise: # Default verion of GEL to deploy version: v1.5.0 + # -- Optional name of the GEL cluster, otherwise will use .Release.Name + # The cluster name must match what is in your GEL license + cluster_name: null + # -- Grafana Enterprise Logs license # In order to use Grafana Enterprise Logs features, you will need to provide # the contents of your Grafana Enterprise Logs license, either by providing the @@ -277,7 +291,7 @@ enterprise: auth: type: {{ .Values.enterprise.adminApi.enabled | ternary "enterprise" "trust" }} auth_enabled: {{ .Values.loki.auth_enabled }} - cluster_name: {{ .Release.Name }} + cluster_name: {{ include "loki.clusterName" . }} license: path: /etc/loki/license/license.jwt @@ -291,12 +305,16 @@ enterprise: # -- Docker image pull policy pullPolicy: IfNotPresent + # -- Alternative name for admin token secret, needed by tokengen and provisioner jobs + adminTokenSecret: null + + # -- Alternative name of the secret to store token for the canary + canarySecret: null + # -- Configuration for `tokengen` target tokengen: # -- Whether the job should be part of the deployment enabled: true - # -- Name of the secret to store the admin token in - adminTokenSecret: "gel-admin-token" # -- Additional CLI arguments for the `tokengen` target extraArgs: [] # -- Additional Kubernetes environment @@ -317,8 +335,40 @@ enterprise: runAsGroup: 10001 runAsUser: 10001 fsGroup: 10001 - # -- Job Create Secret Stage Image to Utilize - image: bitnami/kubectl + + # -- Configuration for `provisioner` target + provisioner: + # -- Whether the job should be part of the deployment + enabled: true + # -- Name of the secret to store provisioned tokens in + provisionedSecretPrefix: '{{ include "loki.name" . }}-provisioned' + # -- Tenants to be created. Each tenant will get a read and write policy + # and associated token. + tenants: [] + # -- Additional Kubernetes environment + env: [] + # -- Additional labels for the `provisioner` Job + labels: {} + # -- Additional annotations for the `provisioner` Job + annotations: {} + # -- The name of the PriorityClass for provisioner Job + priorityClassName: null + # -- Run containers as user `enterprise-logs(uid=10001)` + securityContext: + runAsNonRoot: true + runAsGroup: 10001 + runAsUser: 10001 + fsGroup: 10001 + # -- Provisioner image to Utilize + image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/enterprise-logs-provisioner + # -- Overrides the image tag whose default is the chart's appVersion + tag: null + # -- Docker image pull policy + pullPolicy: IfNotPresent nginxConfig: file: | @@ -544,6 +594,9 @@ monitoring: selfMonitoring: enabled: true + # -- Tenant to use for self monitoring + tenant: "self-monitoring" + # Grafana Agent configuration grafanaAgent: # -- Controls whether to install the Grafana Agent Operator and its CRDs. @@ -580,6 +633,35 @@ monitoring: # -- Additional LogsInstance labels labels: {} + # The Loki canary pushes logs to and queries from this loki installation to test + # that it's working correctly + lokiCanary: + enabled: true + # -- Additional annotations for the `loki-canary` Daemonset + annotations: {} + # -- Additional CLI arguments for the `loki-canary' command + extraArgs: [] + # -- Environment variables to add to the canary pods + extraEnv: [] + # -- Environment variables from secrets or configmaps to add to the canary pods + extraEnvFrom: [] + # -- Resource requests and limits for the canary + resources: {} + # -- Node selector for canary pods + nodeSelector: {} + # -- Tolerations for canary pods + tolerations: [] + # -- Image to use for loki canary + image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/loki-canary + # -- Overrides the image tag whose default is the chart's appVersion + tag: null + # -- Docker image pull policy + pullPolicy: IfNotPresent + # Configuration for the write write: # -- Number of replicas for the write diff --git a/tools/dev/k3d/Makefile b/tools/dev/k3d/Makefile index bc2878b602544..167c1822c9a38 100644 --- a/tools/dev/k3d/Makefile +++ b/tools/dev/k3d/Makefile @@ -2,7 +2,6 @@ IMAGE_TAG := $(shell ../../../tools/image-tag) REGISTRY_PORT ?= $(shell k3d registry list -o json | jq -r '.[] | select(.name == "k3d-grafana") | .portMappings."5000/tcp" | .[0].HostPort') -ENTERPRISE_LOGS_CHECKOUT_DIR ?= $(shell pwd)/../../../../enterprise-logs loki-distributed: prepare build-latest-image $(CURDIR)/scripts/create_cluster.sh loki-distributed $(REGISTRY_PORT) @@ -10,10 +9,13 @@ loki-distributed: prepare build-latest-image sleep 5 tk apply --ext-str registry="k3d-grafana:$(REGISTRY_PORT)" environments/loki-distributed -enterprise-logs: prepare build-provisioner-image +enterprise-logs: prepare $(CURDIR)/scripts/create_cluster.sh enterprise-logs $(REGISTRY_PORT) # wait 5s for the cluster to be ready sleep 5 + $(MAKE) -C $(CURDIR) apply-enterprise-logs + +apply-enterprise-logs: tk apply --ext-str registry="k3d-grafana:$(REGISTRY_PORT)" environments/enterprise-logs down: @@ -63,8 +65,3 @@ build-latest-image: make -C $(CURDIR)/../../.. loki-image docker tag grafana/loki:$(IMAGE_TAG) k4d-grafana:$(REGISTRY_PORT)/loki:latest docker push k3d-grafana:$(REGISTRY_PORT)/loki:latest - -build-provisioner-image: - make -C $(ENTERPRISE_LOGS_CHECKOUT_DIR) enterprise-logs-provisioner-image - docker tag us.gcr.io/kubernetes-dev/enterprise-logs-provisioner:$(shell $(MAKE) -s -C $(ENTERPRISE_LOGS_CHECKOUT_DIR) image-tag) k3d-grafana:$(REGISTRY_PORT)/enterprise-logs-provisioner:latest - docker push k3d-grafana:$(REGISTRY_PORT)/enterprise-logs-provisioner:latest diff --git a/tools/dev/k3d/environments/enterprise-logs/main.jsonnet b/tools/dev/k3d/environments/enterprise-logs/main.jsonnet index 39b75e4052e63..7dbcb05d980c1 100644 --- a/tools/dev/k3d/environments/enterprise-logs/main.jsonnet +++ b/tools/dev/k3d/environments/enterprise-logs/main.jsonnet @@ -1,15 +1,13 @@ local k = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libsonnet'; local tanka = import 'github.com/grafana/jsonnet-libs/tanka-util/main.libsonnet'; -local provisioner = import 'provisioner/provisioner.libsonnet'; - local grafana = import 'grafana/grafana.libsonnet'; local envVar = if std.objectHasAll(k.core.v1, 'envVar') then k.core.v1.envVar else k.core.v1.container.envType; local helm = tanka.helm.new(std.thisFile); local spec = (import './spec.json').spec; -provisioner { +{ local prometheusServerName = self.prometheus.service_prometheus_kube_prometheus_prometheus.metadata.name, local prometheusUrl = 'http://%s:9090' % prometheusServerName, @@ -17,45 +15,16 @@ provisioner { local lokiGatewayUrl = 'http://%s' % lokiGatewayHost, local licenseClusterName = 'enterprise-logs-test-fixture', - local provisionerSecret = 'gel-provisioning-tokens', + local provisionedSecretPrefix = 'provisioned-secret', local adminTokenSecret = 'gel-admin-token', - local tenant = 'team-l', - - _images+:: { - provisioner: '%s/enterprise-logs-provisioner' % std.extVar('registry'), - }, + local tenant = 'loki', _config+:: { clusterName: licenseClusterName, namespace: spec.namespace, adminTokenSecret: adminTokenSecret, adminApiUrl: lokiGatewayUrl, - provisioner+: { - initCommand: [ - '/usr/bin/enterprise-logs-provisioner', - - '-bootstrap-path=/bootstrap', - '-cluster-name=' + licenseClusterName, - '-gel-url=' + lokiGatewayUrl, - - '-instance=%s' % tenant, - - '-access-policy=promtail-l:team-l:logs:write', - '-access-policy=grafana-l:team-l:logs:read', - - '-token=promtail-l', - '-token=grafana-l', - ], - containerCommand: [ - 'bash', - '-c', - 'kubectl create secret generic ' - + provisionerSecret - + ' --from-literal=token-promtail-l="$(cat /bootstrap/token-promtail-l)"' - + ' --from-literal=token-grafana-l="$(cat /bootstrap/token-grafana-l)" ', - ], - }, }, loki: helm.template($._config.clusterName, '../../../../../production/helm/loki', { @@ -67,9 +36,22 @@ provisioner { license: { contents: importstr '../../secrets/gel.jwt', }, - tokengen: { - enable: true, - adminTokenSecret: adminTokenSecret, + adminTokenSecret: adminTokenSecret, + provisioner: { + provisionedSecretPrefix: provisionedSecretPrefix, + tenants: [ + tenant, + ], + }, + }, + monitoring+: { + selfMonitoring+: { + tenant: tenant, + }, + serviceMonitor: { + //TODO: this is required because of the service monitor selector match labels + // from kube-prometheus-stack. + labels: { release: 'prometheus' }, }, }, minio+: { @@ -84,6 +66,15 @@ provisioner { grafana+: { enabled: false, }, + prometheus: { + prometheusSpec: { + serviceMonitorSelector: { + matchLabels: { + release: 'prometheus', + }, + }, + }, + }, }, kubeVersion: 'v1.18.0', noHooks: false, @@ -92,7 +83,7 @@ provisioner { local datasource = grafana.datasource, prometheus_datasource:: datasource.new('prometheus', prometheusUrl, type='prometheus', default=false), loki_datasource:: datasource.new('loki', lokiGatewayUrl, type='loki', default=true) + - datasource.withBasicAuth('team-l', '${PROVISONING_TOKEN_GRAFANA_L}'), + datasource.withBasicAuth(tenant, '${PROVISIONED_TENANT_TOKEN}'), grafana: grafana + grafana.withAnonymous() @@ -161,7 +152,7 @@ provisioner { function(c) c { env+: [ envVar.new('GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS', 'grafana-enterprise-logs-app'), - envVar.fromSecretRef('PROVISONING_TOKEN_GRAFANA_L', provisionerSecret, 'token-grafana-l'), + envVar.fromSecretRef('PROVISIONED_TENANT_TOKEN', '%s-%s' % [provisionedSecretPrefix, tenant], 'token-read'), ], } ), diff --git a/tools/dev/k3d/environments/enterprise-logs/spec.json b/tools/dev/k3d/environments/enterprise-logs/spec.json index d5537aceb1df8..278c6152a8458 100644 --- a/tools/dev/k3d/environments/enterprise-logs/spec.json +++ b/tools/dev/k3d/environments/enterprise-logs/spec.json @@ -6,7 +6,7 @@ "namespace": "environments/enterprise-logs/main.jsonnet" }, "spec": { - "apiServer": "https://0.0.0.0:34517", + "apiServer": "https://0.0.0.0:44365", "namespace": "k3d-enterprise-logs", "resourceDefaults": {}, "expectVersions": {}