diff --git a/.env-example b/.env-example index 4d12da16..31efb27b 100644 --- a/.env-example +++ b/.env-example @@ -1 +1,16 @@ -CONFIG_PASSPHRASE= \ No newline at end of file +BUNDLER_CONFIG_PASSPHRASE= +BUNDLER_NODE_PATH_INDEX=0 +BUNDLER_CHAIN_ID=80001 +BUNDLER_MIN_RELAYER_COUNT=5 +BUNDLER_MAX_RELAYER_COUNT=10 +BUNDLER_FUNDING_BALANCE_THRESHOLD=0.1 +BUNDLER_FUNDING_RELAYER_AMOUNT=0.2 +BUNDLER_SIMULATION_DATA_JSON='{"tenderlyData": {}}' +BUNDLER_TOKEN_PRICE_JSON='{"coinMarketCapApi": ""}' +BUNDLER_SLACK_JSON='{"token": "","channel": ""}' +BUNDLER_PROVIDER_JSON='{"137": "", "42161": "", "56": "", "10": "", "43114": "", "8453": "", "80001": ""}' +BUNDLER_DATASOURCES_JSON= '{"mongoUrl": "", "redisUrl": ""}' +BUNDLER_SOCKET_SERVICE_JSON='{"wssUrl": "","httpUrl": "","token": "","apiKey": ""}' +BUNDLER_QUEUE_URL="" +BUNDLER_IS_TRUSTWALLET_SETUP=true +BUNDLER_FALLBACK_PROVIDER_JSON='{"137": ["", ""], "42161": ["", ""], "56": ["", ""], "10": ["", ""], "43114": ["", ""], "8453": ["", ""], "80001": ["", ""]}' \ No newline at end of file diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index c1773ebb..233e4bf3 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout Repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Check if Master is Ahead of Staging run: | diff --git a/.github/workflows/merge_on_master_tw.yaml b/.github/workflows/merge_on_master_tw.yaml new file mode 100644 index 00000000..4c2ea8c8 --- /dev/null +++ b/.github/workflows/merge_on_master_tw.yaml @@ -0,0 +1,66 @@ +# This workflow will do a clean installation of node dependencies, build the +# source code and run tests across different versions of node +# For more information see: +# https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs + +name: Main_or_Master +on: +# The trigger event is for every pull request. If we would trigger when we push +# to any branch and on pull request we would have double workflow runs and will +# consume more minutes. I chose to trigger the workflow run on pull requests only. + push: + branches: + - 'dedicated-bundler-setup_new' + +jobs: + js_build_test: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [21.x] + # node-version: [14.x, 16.x, 18.x, 20.x] + # See supported Node.js release schedule at + # https://nodejs.org/en/about/releases/ + steps: + - name: checkout repo + uses: actions/checkout@v4 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + - name: Yarn install, build and test + run: echo "In the steps below we will build and run first set of tests for all components" + - run: echo yarn install + - run: echo yarn lint + - run: echo yarn build + - run: echo yarn test + + container_img_build_push_gar: + needs: [js_build_test] + # Allow the job to fetch a GitHub ID token + permissions: + id-token: write + contents: read + # The plan is to build and push each docker image in parallel. + strategy: + matrix: + image: + - us-docker.pkg.dev/biconomy-prod/bundler/trustwallet + - us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + # {owner}/{repo}/.github/workflows/{filename}@{ref} + uses: bcnmy/devops/.github/workflows/container_img_build_push_gar.yaml@master + with: + image: ${{ matrix.image }} + dockerfile: Dockerfile.helm + # GCP project where the identity provider is + # gcloud projects describe prj-workload-identity-001 + gcp_project_number: '766873424314' + gcp_pool_id: 'pool-id-github-actions' + # gcp_provider_id: 'ga-GITHUB_REPO_NAME' + gcp_provider_id: 'ga-bundler' + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + gcp_registry: 'us-docker.pkg.dev/biconomy-prod/bundler/trustwallet' + gcp_service_account: 'sa-bundler@prj-workload-identity-001.iam.gserviceaccount.com' + + # TODO: Add integrations tests here diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000..0b42d307 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,64 @@ +--- +# This workflow will do a clean installation of node dependencies, build the +# source code and run tests across different versions of node +# For more information see: +# https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-nodejs + +# yamllint disable rule:line-length +name: PR +on: + pull_request: + branches: + - '*' + +jobs: + js_build_test: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [21.x] + # node-version: [14.x, 16.x, 18.x, 20.x] + # See supported Node.js release schedule at + # https://nodejs.org/en/about/releases/ + steps: + - name: checkout repo + uses: actions/checkout@v4 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + - name: Yarn install, build and test + run: echo "In the steps below we will build and run first set of tests for all components" + - run: echo yarn install + - run: echo yarn lint + - run: echo yarn build + - run: echo yarn test + + container_img_build_push_gar: + needs: [js_build_test] + # Allow the job to fetch a GitHub ID token + permissions: + id-token: write + contents: read + # The plan is to build and push each docker image in parallel. + strategy: + matrix: + image: + - us-docker.pkg.dev/biconomy-prod/bundler/trustwallet + - us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + # {owner}/{repo}/.github/workflows/{filename}@{ref} + uses: bcnmy/devops/.github/workflows/container_img_build_push_gar.yaml@master + with: + image: ${{ matrix.image }} + dockerfile: Dockerfile.helm + # GCP project where the identity provider is + # gcloud projects describe prj-workload-identity-001 + gcp_project_number: '766873424314' + gcp_pool_id: 'pool-id-github-actions' + # gcp_provider_id: 'ga-GITHUB_REPO_NAME' + gcp_provider_id: 'ga-bundler' + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + gcp_registry: 'us-docker.pkg.dev/biconomy-prod/bundler/trustwallet' + gcp_service_account: 'sa-bundler@prj-workload-identity-001.iam.gserviceaccount.com' + # diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..a5e22216 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,125 @@ +--- +# This workflow will build a docker image and deploy it to trustwallet stating \ +# and production environments + +# yamllint disable rule:line-length +name: Version_Release +on: + push: + tags: + - v0.** +jobs: + js_build_test: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [21.x] + # node-version: [14.x, 16.x, 18.x, 20.x] + # See supported Node.js release schedule at + # https://nodejs.org/en/about/releases/ + steps: + - name: checkout repo + uses: actions/checkout@v4 + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + - name: Yarn install, build and test + run: echo "In the steps below we will build and run first set of tests for all components" + - run: echo yarn install + - run: echo yarn lint + - run: echo yarn build + - run: echo yarn test + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + run: echo "$GITHUB_CONTEXT" + - name: Dump GITHUB_REF + run: echo "${GITHUB_REF}" + + container_img_build_push_gar: + needs: [js_build_test] + # Allow the job to fetch a GitHub ID token + permissions: + id-token: write + contents: read + # The plan is to build and push each docker image in parallel. + strategy: + matrix: + image: + - us-docker.pkg.dev/biconomy-prod/bundler/trustwallet + - us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + # {owner}/{repo}/.github/workflows/{filename}@{ref} + uses: bcnmy/devops/.github/workflows/container_img_build_push_gar.yaml@master + with: + image: ${{ matrix.image }} + dockerfile: Dockerfile.helm + # GCP project where the identity provider is + # gcloud projects describe prj-workload-identity-001 + gcp_project_number: '766873424314' + gcp_pool_id: 'pool-id-github-actions' + # gcp_provider_id: 'ga-GITHUB_REPO_NAME' + gcp_provider_id: 'ga-bundler' + # LOCATION-docker.pkg.dev/PROJECT-ID/REPOSITORY/IMAGE + gcp_registry: 'us-docker.pkg.dev/biconomy-prod/bundler/trustwallet' + gcp_service_account: 'sa-bundler@prj-workload-identity-001.iam.gserviceaccount.com' + + deploy_tw_staging: + needs: [container_img_build_push_gar] + # Allow the job to fetch a GitHub ID token + # runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + uses: bcnmy/devops/.github/workflows/deploy_to_gke.yaml@master + with: + # GCP project where the identity provider is + # gcloud projects describe prj-workload-identity-001 + gcp_project_number: '766873424314' + gcp_project_id: 'biconomy-prod' + gcp_bastion: 'bastion02' + gcp_bastion_zone: 'us-east1-b' + gcp_pool_id: 'pool-id-github-actions' + # created by devops/gcp/github-actions/configure_workload_identity_federation_with_github_actions_pipelines.sh + # gcp_provider_id: 'ga-GITHUB_REPO_NAME' + gcp_provider_id: 'ga-bundler' + # SERVICE_ACCOUNT_EMAIL="${SERVICE_ACCOUNT}@${PROJECT}.iam.gserviceaccount.com" + gcp_service_account: 'sa-bundler@prj-workload-identity-001.iam.gserviceaccount.com' + gcp_cluster_name: 'dedicated-bundler' + gcp_cluster_location: 'us-east1' + use_internal_ip: true + # https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables + # deploy_command: 'helm ls --all-namespaces' + # deploy_command: 'echo IMG_VERSION is ${GITHUB_REF:10}' # for example extracts v0.0.5 from "refs/tags/v0.0.5" + deploy_command: './install-bundler/bundler-update-release.sh bundler-tw-staging.cfg ${GITHUB_REF:10}' + + deploy_tw_prod: + needs: [deploy_tw_staging] + # environment: tw-prod + # Allow the job to fetch a GitHub ID token + # runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + uses: bcnmy/devops/.github/workflows/deploy_to_gke.yaml@master + with: + environment: 'tw-prod' + # GCP project where the identity provider is + # gcloud projects describe prj-workload-identity-001 + gcp_project_number: '766873424314' + # GCP project ID where the workload will be deployed + gcp_project_id: 'prj-biconomy-prod-001' + gcp_bastion: 'bastion02' + gcp_bastion_zone: 'us-central1-a' + gcp_pool_id: 'pool-id-github-actions' + # created by devops/gcp/github-actions/configure_workload_identity_federation_with_github_actions_pipelines.sh + # gcp_provider_id: 'ga-GITHUB_REPO_NAME' + gcp_provider_id: 'ga-bundler' + # SERVICE_ACCOUNT_EMAIL="${SERVICE_ACCOUNT}@${PROJECT}.iam.gserviceaccount.com" + gcp_service_account: 'sa-bundler@prj-workload-identity-001.iam.gserviceaccount.com' + gcp_cluster_name: 'trustwallet' + gcp_cluster_location: 'us-central1' + use_internal_ip: true + deploy_command: './install-bundler/bundler-update-release.sh bundler-tw-production.cfg ${GITHUB_REF:10}' + # deploy_command: 'helm ls --all-namespaces' diff --git a/.gitignore b/.gitignore index bd1b9f75..626f99c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,27 @@ node_modules .env yarn-error.log -centrifugo_config.json relayer/dist server/dist coverage -config.json config.json.enc *heapsnapshot -.vscode +refundAddresses.ts +*.enc +# Ignore all JSON files +*.json +*json.enc +# But not these specific files +!static-config.json +!config-example.json +!centrifugo_config.json + +# If index.ts is not a .json file, you don't need to explicitly exclude it. +# However, if it's actually index.json, include it like so: +#!index.json + +!example.cfg dist/ *.code-workspace config/development.json +config/production.json diff --git a/Dockerfile b/Dockerfile index 07b49f54..5d6820a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,36 @@ -FROM node:18.17.1-bookworm +FROM node:21.6-bookworm as builder -# install dependencies -RUN apt update - -# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. -# NOTE: See https://github.com/hexops/dockerfile#is-tini-still-required-in-2020-i-thought-docker-added-it-natively -RUN apt-get install tini +# Install dependencies # arguments ARG PORT=3000 -RUN mkdir -p /relayer-node -WORKDIR /relayer-node +# Set up the directory +WORKDIR /bundler +# Copy package files COPY package.json yarn.lock ./ # install packages RUN yarn install -COPY . /relayer-node +# Copy the rest of the files +COPY . /bundler + +# Build the application RUN yarn run build + +# Second stage +FROM node:21.6-bookworm + +# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. +# NOTE: See https://github.com/hexops/dockerfile#is-tini-still-required-in-2020-i-thought-docker-added-it-natively + +RUN apt-get update && apt-get install -y \ + tini \ + && rm -rf /var/lib/apt/lists/* + +# Expose the port EXPOSE 3000 # Non-root user for security purposes. @@ -32,11 +43,15 @@ EXPOSE 3000 # such a user does not exist. RUN addgroup --gid 10001 --system nonroot \ && adduser --uid 10000 --system --ingroup nonroot --home /home/nonroot nonroot -EXPOSE 3000 + +WORKDIR /home/nonroot/bundler +COPY --from=builder --chown=10000:10001 /bundler /home/nonroot/bundler + # Use the non-root user to run our application USER nonroot ENTRYPOINT ["/usr/bin/tini", "--", "yarn"] +# Default arguments for your app (remove if you have none): CMD ["run", "start"] diff --git a/Dockerfile.dev b/Dockerfile.dev index 92b88796..13c07968 100644 --- a/Dockerfile.dev +++ b/Dockerfile.dev @@ -1,14 +1,35 @@ -FROM node:18.17.1-bookworm +FROM node:21.6-bookworm as builder -# update apt packages -RUN apt-get update +# Install dependencies # arguments ARG PORT=3000 +# Set up the directory +WORKDIR /bundler + +# Copy package files +COPY package.json yarn.lock ./ + +# install packages +RUN yarn install + +# Copy the rest of the files +COPY . . + +# Second stage +FROM node:21.6-bookworm + # Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. # NOTE: See https://github.com/hexops/dockerfile#is-tini-still-required-in-2020-i-thought-docker-added-it-natively -RUN apt-get install tini + +RUN apt-get update && apt-get install -y \ + tini \ + && rm -rf /var/lib/apt/lists/* + + +# Expose the port +EXPOSE 3000 # Non-root user for security purposes. # @@ -20,17 +41,14 @@ RUN apt-get install tini # such a user does not exist. RUN addgroup --gid 10001 --system nonroot \ && adduser --uid 10000 --system --ingroup nonroot --home /home/nonroot nonroot -EXPOSE 3000 WORKDIR /home/nonroot/bundler -COPY package.json yarn.lock ./ - -# install packages -RUN yarn install -COPY . . -ENTRYPOINT ["/usr/bin/tini", "--", "yarn"] +COPY --from=builder --chown=10000:10001 /bundler /home/nonroot/bundler # Use the non-root user to run our application USER nonroot + +ENTRYPOINT ["/usr/bin/tini", "--", "yarn"] + # Default arguments for your app (remove if you have none): CMD ["run", "dev"] diff --git a/Dockerfile.helm b/Dockerfile.helm new file mode 100644 index 00000000..bab11c59 --- /dev/null +++ b/Dockerfile.helm @@ -0,0 +1,54 @@ +FROM node:21.6-bookworm as builder + +# Install dependencies + +# Arguments +ARG PORT=3000 + +# Set up the directory +WORKDIR /bundler + +# Copy package files +COPY package.json yarn.lock ./ + +# Install packages +RUN yarn install + +# Copy the rest of the files +COPY . /bundler + +# Build the application +RUN yarn run build + +# Second stage +FROM node:21.6-bookworm + +# Tini allows us to avoid several Docker edge cases, see https://github.com/krallin/tini. +# NOTE: See https://github.com/hexops/dockerfile#is-tini-still-required-in-2020-i-thought-docker-added-it-natively + +RUN apt-get update && apt-get install -y \ + tini \ + && rm -rf /var/lib/apt/lists/* + +# Expose the port +EXPOSE 3000 + +# Non-root user for security purposes. +# +# UIDs below 10,000 are a security risk, as a container breakout could result +# in the container being ran as a more privileged user on the host kernel with +# the same UID. +# +# Static GID/UID is also useful for chown'ing files outside the container where +# such a user does not exist. +RUN addgroup --gid 10001 --system nonroot \ + && adduser --uid 10000 --system --ingroup nonroot --home /home/nonroot nonroot + +WORKDIR /home/nonroot/bundler +COPY --from=builder --chown=10000:10001 /bundler /home/nonroot/bundler + +# Use the non-root user to run our application +USER nonroot + +# Default arguments for your app (remove if you have none): +ENTRYPOINT ["/usr/bin/tini", "--","./entrypoint.sh" ] diff --git a/README.md b/README.md index f28ab75b..4e7a2df3 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,11 @@ # Bundler + Biconomy ERC-4337 bundler-as-a-service implementation. ## Workflow best practices -This project has two *important* branches: +This project has two _important_ branches: + - `master`: Production branch. This is code that's currently in production. - `staging`: Staging branch. This represents what will be deployed to staging and included in the next release after it's been tested. @@ -12,6 +14,7 @@ To write quality commit messages we (try to) follow the [Conventional Commits St ## Local Development Environment There are 2 ways to run the service and it's dependencies locally: + 1. **Manually**: follow the instructions in the [Bundler Local Setup](https://www.notion.so/biconomy/Local-setup-858695240f3a4c19b6c96cbb3f235b0a?pvs=4) Notion page. 2. **Docker (recommended)**: follow the instructions below. @@ -21,6 +24,7 @@ There are 2 ways to run the service and it's dependencies locally: 3. Run `docker compose up` and the server and all of it's dependencies should run in the current terminal session without throwing any errors. Other useful commands: + - `docker compose down`: stop the containers without deleting their data. - `docker compose down -v` tears down the whole environment, killing the containers and deleting any data volumes permanently. ⚠️ This will delete the local DB, do it only if you don't care about the data. - `docker compose up -d`: runs the containers in the background without blocking the current terminal sessions. @@ -32,4 +36,4 @@ Other useful commands: When adding a new chain integration make sure to add the new charin id to all relevant config deployment files, in the `supportedNetworks` e.g. `config/staging.json` or the bunlder will not start that network -even thouh is supported. \ No newline at end of file +even thouh is supported. diff --git a/centrifugo_config.json b/centrifugo_config.json new file mode 100644 index 00000000..37982cc7 --- /dev/null +++ b/centrifugo_config.json @@ -0,0 +1,30 @@ +{ + "token_hmac_secret_key": "9edb7c38-0f55-4627-9bda-4cc050b5f6cb", + "admin_password": "admin_password", + "admin_secret": "admin_secret", + "api_key": "a4c3c3df-4294-4719-a6a6-0c3416d68466", + "allowed_origins": ["*"], + "debug": true, + "admin": true, + "log_level": "debug", + "client_anonymous": true, + "client_channel_limit": 512, + "namespaces": [ + { + "name": "relayer", + "publish": true, + "history_size": 10, + "history_ttl": "300s", + "recover": true, + "anonymous": false + }, + { + "name": "transaction", + "publish": true, + "history_size": 10, + "history_ttl": "300s", + "recover": true, + "anonymous": true + } + ] +} \ No newline at end of file diff --git a/config/default.json b/config/default.json index 93a11d14..5fd70f02 100644 --- a/config/default.json +++ b/config/default.json @@ -1014,6 +1014,7 @@ "networksNotSupportingEthCallStateOverrides": [ 1101, 1442, 81, 592, 169, 3441005, 91715, 7116, 9980, 88882, 88888, 81457 ], + "trustWalletSupportedNetworks": [137, 80001, 56, 204, 42161, 10, 8453], "networksNotSupportingEthCallBytecodeStateOverrides": [ 1, 59140, 59144, 84532, 421614, 168587773, 80001, 43113, 11155111, 1101, 88882, 88888, 1442, 420, 81457 @@ -1067,5 +1068,6 @@ }, "aaDashboardBackend": { "url": "https://paymaster-dashboard-backend-v2.staging.biconomy.io/api/v2/bundlers/auth" - } + }, + "isTWSetup": false } diff --git a/doc/bundler-tw-staging.md b/doc/bundler-tw-staging.md new file mode 100644 index 00000000..c3f32b80 --- /dev/null +++ b/doc/bundler-tw-staging.md @@ -0,0 +1,267 @@ + +This file contains the output of running the initial setup script for +bundler tw staging. +```bash +./bundler-initial-setup.sh configs/bundler-tw-staging.cfg +``` + +
+ ./bundler-initial-setup.sh configs/bundler-tw-staging.cfg + +``` +Current cluster name is: dedicated-bundler +Current trimmed cluster name is: dedicate +This script must be run only once for a new bundler deployment in the namespace +Context matches. Continuing... +Global IP with name bundler-tw-staging already exists. + The IP address for bundler-tw-staging is: 34.110.199.133 + Make sure you have added an A record against bundler-tw-staging.biconomy.io with 34.110.199.133 +NAME STATUS AGE +bundler-tw-staging Active 32h + SUCCESS: bundler-tw-staging exists GCP Secret dedicate-bundler-tw-config doesn't exist. +Creating it now from /Users/radupopa/p/bico/bundler/config.json.enc +Created version [1] of the secret [dedicate-bundler-tw-config]. +Secret dedicate-bundler-tw-config has been created. +GCP_ENCRYPTED_CONFIG_SECRET dedicate-bundler-tw-config process completed. +Secret dedicate-bundler-tw-passphrase does not exist. +Enter the password: +Created version [1] of the secret [dedicate-bundler-tw-passphrase]. +Secret dedicate-bundler-tw-passphrase created successfully. +Creting GCP IAM role/service [dedicate-bundler-tw-bund] account +Created service account [dedicate-bundler-tw-bund]. +Creation of GCP IAM role/service [dedicate-bundler-tw-bund] account completed +Adding role binding of GCP IAM role/service [dedicate-bundler-tw-bund] to GCP_ENCRYPTED_CONFIG_SECRET=[dedicate-bundler-tw-config] +Updated IAM policy for secret [dedicate-bundler-tw-config]. +bindings: +- members: + - serviceAccount:dedicate-bundler-tw-bund@biconomy-prod.iam.gserviceaccount.com + role: roles/secretmanager.secretAccessor +etag: BwYPY_iWdus= +version: 1 +Role binding of GCP IAM role/service [dedicate-bundler-tw-bund] to GCP_ENCRYPTED_CONFIG_SECRET=[dedicate-bundler-tw-config] completed +Adding role binding of GCP IAM role/service [dedicate-bundler-tw-bund] to SECRET_NAME=[dedicate-bundler-tw-passphrase] +Updated IAM policy for secret [dedicate-bundler-tw-passphrase]. +bindings: +- members: + - serviceAccount:dedicate-bundler-tw-bund@biconomy-prod.iam.gserviceaccount.com + role: roles/secretmanager.secretAccessor +etag: BwYPY_i1I7k= +version: 1 +Role binding of GCP IAM role/service [dedicate-bundler-tw-bund] to SECRET_NAME=[dedicate-bundler-tw-passphrase] completed +Creating dedicate-bundler-tw--sa in bundler-tw-staging +serviceaccount/dedicate-bundler-tw--sa created +dedicate-bundler-tw--sa in bundler-tw-staging created +Binding dedicate-bundler-tw-bund with kubernetes dedicate-bundler-tw--sa for workloadIdentityUser in bundler-tw-staging +Updated IAM policy for serviceAccount [dedicate-bundler-tw-bund@biconomy-prod.iam.gserviceaccount.com]. +bindings: +- members: + - serviceAccount:biconomy-prod.svc.id.goog[bundler-tw-staging/dedicate-bundler-tw--sa] + role: roles/iam.workloadIdentityUser +etag: BwYPY_kR-wE= +version: 1 +Binding Complete +Annotating dedicate-bundler-tw-bund with kubernetes dedicate-bundler-tw--sa for workloadIdentityUser in bundler-tw-staging +serviceaccount/dedicate-bundler-tw--sa annotate +Annotation Complete + +------ Deploying redis to bundler-tw-staging ----- +Release "redis" does not exist. Installing it now. +W0120 19:32:11.665549 31221 warnings.go:70] autopilot-default-resources-mutator:Autopilot updated StatefulSet bundler-tw-staging/redis-replicas: adjusted resources to meet requirements for containers [redis] (see http://g.co/gke/autopilot-resources) +W0120 19:32:11.665557 31221 warnings.go:70] autopilot-default-resources-mutator:Autopilot updated StatefulSet bundler-tw-staging/redis-master: adjusted resources to meet requirements for containers [redis] (see http://g.co/gke/autopilot-resources) +NAME: redis +LAST DEPLOYED: Sat Jan 20 19:32:01 2024 +NAMESPACE: bundler-tw-staging +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +CHART NAME: redis +CHART VERSION: 18.1.2 +APP VERSION: 7.2.1 + +** Please be patient while the chart is being deployed ** + +Redis® can be accessed on the following DNS names from within your cluster: + + redis-master.bundler-tw-staging.svc.cluster.local for read/write operations (port 6379) + redis-replicas.bundler-tw-staging.svc.cluster.local for read-only operations (port 6379) + + + +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace bundler-tw-staging redis -o jsonpath="{.data.redis-password}" | base64 -d) + +To connect to your Redis® server: + +1. Run a Redis® pod that you can use as a client: + + kubectl run --namespace bundler-tw-staging redis-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image docker.io/bitnami/redis:7.2.1-debian-11-r0 --command -- sleep infinity + + Use the following command to attach to the pod: + + kubectl exec --tty -i redis-client \ + --namespace bundler-tw-staging -- bash + +2. Connect using the Redis® CLI: + REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-master + REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h redis-replicas + +To connect to your database from outside the cluster execute the following commands: + + kubectl port-forward --namespace bundler-tw-staging svc/redis-master 6379:6379 & + REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli -h 127.0.0.1 -p 6379 +Mongo deployment completed +Redis url \033[0;32m<>\033[0m +To debug Redis, you can use the following commands: +#kubectl run redis-debug --rm -i --tty --image redis:latest -- bash +If you want to benchmark your redis installation +redis-benchmark -h 127.0.0.1 -p -c 100 -n 100000 -a +#redis-cli -h redis-master.bundler-tw-staging.svc.cluster.local -a IAMredis985834 + + ####### Deploying rabbitmq to bundler-tw-staging ####### +Release "rabbitmq" does not exist. Installing it now. +W0120 19:34:02.986382 31666 warnings.go:70] autopilot-default-resources-mutator:Autopilot updated StatefulSet bundler-tw-staging/rabbitmq: adjusted resources to meet requirements for containers [rabbitmq] (see http://g.co/gke/autopilot-resources) +NAME: rabbitmq +LAST DEPLOYED: Sat Jan 20 19:33:50 2024 +NAMESPACE: bundler-tw-staging +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +CHART NAME: rabbitmq +CHART VERSION: 12.3.0 +APP VERSION: 3.12.7** Please be patient while the chart is being deployed ** + +Credentials: + echo "Username : RMQUsername" + echo "Password : $(kubectl get secret --namespace bundler-tw-staging rabbitmq -o jsonpath="{.data.rabbitmq-password}" | base64 -d)" + echo "ErLang Cookie : $(kubectl get secret --namespace bundler-tw-staging rabbitmq -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port 5672 at rabbitmq.bundler-tw-staging.svc.cluster.local + +To access for outside the cluster, perform the following steps: + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:5672/" + kubectl port-forward --namespace bundler-tw-staging svc/rabbitmq 5672:5672 + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:15672/" + kubectl port-forward --namespace bundler-tw-staging svc/rabbitmq 15672:15672 + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace bundler-tw-staging svc/rabbitmq 9419:9419 & + echo "Prometheus Metrics URL: http://127.0.0.1:9419/metrics" + +Then, open the obtained URL in a browser. +[RABBITMQ] deployement completedTo connect to the rabitMQ +To test connections +kubectl run curl-test --namespace bundler-tw-staging --image=busybox --rm -it -- /bin/sh +curl -i -u RMQUsername:RMQpassword http://rabbitmq.testing.svc.cluster.local:15672/api/overview + +------ Deploying mongo to bundler-tw-staging ----- +Release "mongo" does not exist. Installing it now. +W0120 19:39:08.696250 31721 warnings.go:70] autopilot-default-resources-mutator:Autopilot updated Deployment bundler-tw-staging/mongo: defaulted unspecified resources for containers [metrics] (see http://g.co/gke/autopilot-defaults), and adjusted resources to meet requirements for containers [mongodb] (see http://g.co/gke/autopilot-resources) +NAME: mongo +LAST DEPLOYED: Sat Jan 20 19:38:58 2024 +NAMESPACE: bundler-tw-staging +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +CHART NAME: mongodb +CHART VERSION: 13.18.4 +APP VERSION: 6.0.10 + +** Please be patient while the chart is being deployed ** + +MongoDB® can be accessed on the following DNS name(s) and ports from within your cluster: + + mongo.bundler-tw-staging.svc.cluster.local + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace bundler-tw-staging mongo -o jsonpath="{.data.mongodb-root-password}" | base64 -d) + +To get the password for "userone" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace bundler-tw-staging mongo -o jsonpath="{.data.mongodb-passwords}" | base64 -d | awk -F',' '{print $1}') + +To get the password for "usertwo" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace bundler-tw-staging mongo -o jsonpath="{.data.mongodb-passwords}" | base64 -d | awk -F',' '{print $2}') + +To connect to your database, create a MongoDB® client container: + + kubectl run --namespace bundler-tw-staging mongo-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image docker.io/zcube/bitnami-compat-mongodb:latest --command -- bash + +Then, run the following command: + mongosh admin --host "mongo" --authenticationDatabase admin -u $MONGODB_ROOT_USER -p $MONGODB_ROOT_PASSWORD + +To connect to your database from outside the cluster execute the following commands: + + kubectl port-forward --namespace bundler-tw-staging svc/mongo 27017:27017 & + mongosh --host 127.0.0.1 --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD + +To access the MongoDB® Prometheus metrics, get the MongoDB® Prometheus URL by running: + + kubectl port-forward --namespace bundler-tw-staging svc/mongo-metrics 9216:9216 & + echo "Prometheus Metrics URL: http://127.0.0.1:9216/metrics" + +Then, open the obtained URL in a browser. +Mongo deployment completed +To connect to the databases, use the following URLs: +Paymaster dashboard DB URL: +mongodb://usertwo:usertwopassword@mongo.bundler-tw-staging.svc.cluster.local:27017/paymaster-dashboard + +RelayerNode DB URL: +mongodb://userone:useronepassword@mongo.bundler-tw-staging.svc.cluster.local:27017/relayer-node-service + +To test connections +kubectl run -i --tty --rm debugmongo --image=mongo --restart=Never --namespace=bundler-tw-staging -- bash +mongosh mongodb://mongo.bundler-tw-staging.svc.cluster.local:27017/relayer-node-service -u userone -p useronepassword + +####### Deploying centrifugo to bundler-tw-staging ####### +Release "centrifugo" does not exist. Installing it now. +W0120 19:41:09.094580 31734 warnings.go:70] autopilot-default-resources-mutator:Autopilot updated Deployment bundler-tw-staging/centrifugo: adjusted resources to meet requirements for containers [centrifugo] (see http://g.co/gke/autopilot-resources) +NAME: centrifugo +LAST DEPLOYED: Sat Jan 20 19:41:02 2024 +NAMESPACE: bundler-tw-staging +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +1. Get the application URL by running these commands: + export POD_NAME=$(kubectl get pods --namespace bundler-tw-staging -l "app.kubernetes.io/name=centrifugo,app.kubernetes.io/instance=centrifugo" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace bundler-tw-staging port-forward $POD_NAME 8000:8000 + + + ####### Deployed centrifugo to bundler-tw-staging ####### + +staging-trust-wallet-chains.sh +bundler-tw-staging.biconomy.io +staging-trust-wallet-chains.sh +/Users/radupopa/p/bico/bundler/install-bundler +/Users/radupopa/p/bico/bundler/install-bundler/configs/chains/staging-trust-wallet-chains.sh +Chains that are being added to Ingress 80001 +{80001} +IP_NAME that will be attached to Ingress is bundler-tw-staging +Release "network" does not exist. Installing it now. +W0120 19:41:30.901571 31746 warnings.go:70] annotation "kubernetes.io/ingress.class" is deprecated, please use 'spec.ingressClassName' instead +NAME: network +LAST DEPLOYED: Sat Jan 20 19:41:25 2024 +NAMESPACE: bundler-tw-staging +STATUS: deployed +REVISION: 1 +TEST SUITE: None +####### Deployed network to bundler-tw-staging ####### +``` +
\ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 00000000..053cc7a6 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Set ORDINAL_INDEX from file + +## this is being mounted from the kubernetes statefulset +## the nodepathindex will be extracetd from the statefulset +#ordinal index and will be written to this path. +export NODE_CONFIG=$(cat /etc/podinfo/ordinal_index) + + +## this is being mounted from the kubernetes statefulset +## the secret will be read from the relevant secret +# and will be written at this path +export BUNDLER_CONFIG_PASSPHRASE=$(cat /gcpsecrets/config-passphrase) + +# Start Node.js application + +# Command to run the application +# sleep 3600 +yarn run start \ No newline at end of file diff --git a/install-bundler/.DS_Store b/install-bundler/.DS_Store new file mode 100644 index 00000000..c56d942e Binary files /dev/null and b/install-bundler/.DS_Store differ diff --git a/install-bundler/ParseLogs.md b/install-bundler/ParseLogs.md new file mode 100644 index 00000000..acc4042a --- /dev/null +++ b/install-bundler/ParseLogs.md @@ -0,0 +1,301 @@ + +# Progression of logs + +## Phase 1: When a useOps is submitted, this is the log entry you will see in the the console. +``` +{ + "insertId": "wipvkdza8k5odu2i", + "jsonPayload": { + "level": "info", + "message": "userOp received: {\"sender\":\"0xf275fa2560ceb8badc313a2a29fb166f7ca5de5e\",\"nonce\":\"0x0e\",\"initCode\":\"0x\",\"callData\":\"0x912ccaa3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000000020000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000\",\"maxFeePerGas\":\"0x861c4698\",\"maxPriorityFeePerGas\":\"0x59682f10\",\"verificationGasLimit\":\"0xead4\",\"callGasLimit\":\"0x01b5f7\",\"preVerificationGas\":\"0x0122a4\",\"paymasterAndData\":\"0x000031dd6d9d3a133e663660b959162870d755d4000000000000000000000000d39222801871b185ca8db59a62153ee6f07c038800000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000041c3f1dc30d135ec799c9a691883d1e3cbb5db41dc7ff55ee6c935ac395a6d12e21d2b04c296e1deeda3d6ca728a5b48c6206e5955ae5e16c04afb25526ab496841b00000000000000000000000000000000000000000000000000000000000000\",\"signature\":\"0x661c06494b1d0d0844130b59db399b763b5d9df0a36cbe80a38b41d4dbbe27b7016821f14349e96307533a72a95550dd83909853e3b9a6bfa8a274849b8423451b\"} on chainId: 80001", + "path": "dist/common/simulation/BundlerSimulationService.js", + "hostname": "bundlers-statefulset-1", + "request-id": "3e063180-61cc-11ee-b6a7-ab6d82b174d2", + "timestamp": "2023-10-03 09:07:13 690" + }, + "resource": { + "type": "k8s_container", + "labels": { + "location": "us-east1-b", + "container_name": "bundlers-pod", + "namespace_name": "qabundler", + "project_id": "biconomy-test-397415", + "pod_name": "bundlers-statefulset-1", + "cluster_name": "with-gcplb" + } + }, + "timestamp": "2023-10-03T09:07:13.690197056Z", + "severity": "INFO", + "labels": { + "k8s-pod/app_kubernetes_io/name": "bundlers", + "compute.googleapis.com/resource_name": "gke-with-gcplb-default-pool-71afda7e-fbdt", + "k8s-pod/app": "bundlers", + "k8s-pod/statefulset_kubernetes_io/pod-name": "bundlers-statefulset-1", + "k8s-pod/namespace": "qabundler", + "k8s-pod/controller-revision-hash": "bundlers-statefulset-566644c5b4", + "k8s-pod/app_kubernetes_io/instance": "qabundler-test-bundler" + }, + "logName": "projects/biconomy-test-397415/logs/stdout", + "receiveTimestamp": "2023-10-03T09:07:15.612152409Z" +} +``` + +#### Python code to get UserOps submitted with the timestamp and nonce + +``` +def userop_submitted_noce(data): + userop_nonces = [] + for e in data: + if e.get("jsonPayload") and e["jsonPayload"].get("message"): + if "userOp received" in e["jsonPayload"]["message"]: + content = e["jsonPayload"]["message"] + match = re.search(r'"nonce":"(.*?)"', content) + nonce = match.group(1) if match else None + userop_nonces.append({ + "timestamp": e["jsonPayload"]["timestamp"], + "pod_name":e["resource"]["labels"]["pod_name"], + "nonce": nonce, + "message": "UserOp received"}) + return userop_nonces +``` + +``` + userop_nonces = userop_submitted_noce(data) +``` + + +## Phase 2: When active relayer is selected to submit the tx +``` +{ + "insertId": "vd548aq0m3y9a94e", + "jsonPayload": { + "path": "src/services/consumer/BundlerConsumer.js", + "message": "Setting active relayer: 0x75c033055cd4f144d51b59b35dae02d91be03e74 as beneficiary for userOp: {\"sender\":\"0x23da80269487c6033827360fac4e283dbb150e58\",\"nonce\":\"0x0a\",\"initCode\":\"0x\",\"callData\":\"0x912ccaa3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000000020000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000\",\"maxFeePerGas\":\"0x861c4698\",\"maxPriorityFeePerGas\":\"0x59682f10\",\"verificationGasLimit\":\"0xead4\",\"callGasLimit\":\"0x01b5f7\",\"preVerificationGas\":\"0x0122a4\",\"paymasterAndData\":\"0x000031dd6d9d3a133e663660b959162870d755d4000000000000000000000000d39222801871b185ca8db59a62153ee6f07c038800000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000041c9d8457ec4e8ff42e5881686170def54e01c9bc67a75b69ee1dd394f467669884ff438a03790d30ec184184129f71873d9c59ee63c770d26689319b53d4d7fd11c00000000000000000000000000000000000000000000000000000000000000\",\"signature\":\"0x2ef30e3df238b635742e27e3da89f987005907f1babd6f80cb21437646513c0557b4df6b234b3fce62cf84dc5cb001ce55de3460db78822e98395dd8fef3caa91c\"} for transactionId: 0x18a9c030ba977a4e3360be492b1867e34728397a1ef121cf686844288150cfb4 on chainId: 80001", + "hostname": "bundlers-statefulset-1", + "level": "info", + "timestamp": "2023-10-03 09:07:13 509" + }, + "resource": { + "type": "k8s_container", + "labels": { + "location": "us-east1-b", + "container_name": "bundlers-pod", + "cluster_name": "with-gcplb", + "pod_name": "bundlers-statefulset-1", + "project_id": "biconomy-test-397415", + "namespace_name": "qabundler" + } + }, + "timestamp": "2023-10-03T09:07:13.509152500Z", + "severity": "INFO", + "labels": { + "k8s-pod/namespace": "qabundler", + "k8s-pod/controller-revision-hash": "bundlers-statefulset-566644c5b4", + "compute.googleapis.com/resource_name": "gke-with-gcplb-default-pool-71afda7e-fbdt", + "k8s-pod/app_kubernetes_io/instance": "qabundler-test-bundler", + "k8s-pod/statefulset_kubernetes_io/pod-name": "bundlers-statefulset-1", + "k8s-pod/app_kubernetes_io/name": "bundlers", + "k8s-pod/app": "bundlers" + }, + "logName": "projects/biconomy-test-397415/logs/stdout", + "receiveTimestamp": "2023-10-03T09:07:15.612152409Z" +} +``` +#### python code +``` + +def nonce_to_txids(data): + array = [] + txid_nonce = {} + for e in data: + if e.get("jsonPayload") and e["jsonPayload"].get("message"): + if "Setting active relayer" in e["jsonPayload"]["message"]: + s = e["jsonPayload"]["message"] + active_relayer_match = re.search(r'Setting active relayer: (\w+)', s) + activeRelayer = active_relayer_match.group(1) if active_relayer_match else None + transaction_id_match = re.search(r'transactionId: (\w+)', s) + transactionId = transaction_id_match.group(1) if transaction_id_match else None + # Extract nonce from the JSON + nonce_match = re.search(r'"nonce":"(\w+)"', s) + nonce = nonce_match.group(1) if nonce_match else None + array.append({ + "timestamp": e["jsonPayload"]["timestamp"], + "nonce": nonce, + "transactionId": transactionId, + "pod_name": e["resource"]["labels"]["pod_name"], + "relayer": activeRelayer, + "message": "Active relayer assigned"}) + txid_nonce[transactionId] = nonce + return array, txid_nonce + +``` + +``` +nonceTxIds, txid_nonce = nonce_to_txids(data) +``` + + +## 3rd phase: Submitted to the blockchain + +``` +{ + "insertId": "sz6qqeiwmg490c1r", + "jsonPayload": { + "level": "info", + "timestamp": "2023-10-03 09:07:13 529", + "hostname": "bundlers-statefulset-1", + "message": "Sending transaction to network: {\"from\":\"0x75c033055cd4f144d51b59b35dae02d91be03e74\",\"to\":\"0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789\",\"value\":\"0x0\",\"gasLimit\":\"0x22a6ae\",\"data\":\"0x1fad948c000000000000000000000000000000000000000000000000000000000000004000000000000000000000000075c033055cd4f144d51b59b35dae02d91be03e740000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000023da80269487c6033827360fac4e283dbb150e58000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000001b5f7000000000000000000000000000000000000000000000000000000000000ead400000000000000000000000000000000000000000000000000000000000122a400000000000000000000000000000000000000000000000000000000861c46980000000000000000000000000000000000000000000000000000000059682f100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000244912ccaa3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000000020000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e497953000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d4000031dd6d9d3a133e663660b959162870d755d4000000000000000000000000d39222801871b185ca8db59a62153ee6f07c038800000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000041c9d8457ec4e8ff42e5881686170def54e01c9bc67a75b69ee1dd394f467669884ff438a03790d30ec184184129f71873d9c59ee63c770d26689319b53d4d7fd11c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000412ef30e3df238b635742e27e3da89f987005907f1babd6f80cb21437646513c0557b4df6b234b3fce62cf84dc5cb001ce55de3460db78822e98395dd8fef3caa91c00000000000000000000000000000000000000000000000000000000000000\",\"chainId\":80001,\"nonce\":16,\"type\":2,\"maxFeePerGas\":\"0x861c4698\",\"maxPriorityFeePerGas\":\"0x59682f10\"} for bundler address: 0x75c033055cd4f144d51b59b35dae02d91be03e74 for transactionId: 0x18a9c030ba977a4e3360be492b1867e34728397a1ef121cf686844288150cfb4 on chainId: 80001", + "path": "src/services/transaction-service/EVMTransactionService.js" + }, + "resource": { + "type": "k8s_container", + "labels": { + "project_id": "biconomy-test-397415", + "namespace_name": "qabundler", + "cluster_name": "with-gcplb", + "container_name": "bundlers-pod", + "location": "us-east1-b", + "pod_name": "bundlers-statefulset-1" + } + }, + "timestamp": "2023-10-03T09:07:13.529237399Z", + "severity": "INFO", + "labels": { + "k8s-pod/namespace": "qabundler", + "k8s-pod/statefulset_kubernetes_io/pod-name": "bundlers-statefulset-1", + "k8s-pod/app_kubernetes_io/name": "bundlers", + "k8s-pod/app": "bundlers", + "compute.googleapis.com/resource_name": "gke-with-gcplb-default-pool-71afda7e-fbdt", + "k8s-pod/controller-revision-hash": "bundlers-statefulset-566644c5b4", + "k8s-pod/app_kubernetes_io/instance": "qabundler-test-bundler" + }, + "logName": "projects/biconomy-test-397415/logs/stdout", + "receiveTimestamp": "2023-10-03T09:07:15.612152409Z" +} +``` + +#### python code +``` + + +def submitted_to_blockchain(data, txid_nonce): + result = [] + for e in data: + if e.get("jsonPayload") and e["jsonPayload"].get("message"): + if "Sending transaction to network" in e["jsonPayload"]["message"]: + s = e["jsonPayload"]["message"] + transaction_id_match = re.search(r'transactionId: (\w+)', s) + transactionId = transaction_id_match.group(1) if transaction_id_match else None + + # Extract nonce from the JSON + _result = { + "timestamp": e["jsonPayload"]["timestamp"], + "transactionId": transactionId, + "pod_name": e["resource"]["labels"]["pod_name"], + "message": "Tx submitted on blockchain" + } + try: + _result.update({"nonce": txid_nonce[transactionId]}) + except Exception: + _result.update({"nonce": None}) + + result.append(_result) + + return result +``` + +``` +submitted = submitted_to_blockchain(data, txid_nonce) +``` + +## Phase 4: Transaction Execution + +``` +{ + "insertId": "akmjpt3hh4udkj42", + "jsonPayload": { + "message": "Transaction execution response for transactionId 0x1e80dc26bc1307e1119d657dd5ca7147d7cdeac8b0560e035228021204da62a7: {\"success\":true,\"transactionResponse\":{\"type\":2,\"chainId\":80001,\"nonce\":19,\"maxPriorityFeePerGas\":{\"type\":\"BigNumber\",\"hex\":\"0x59682f10\"},\"maxFeePerGas\":{\"type\":\"BigNumber\",\"hex\":\"0x861c4698\"},\"gasPrice\":null,\"gasLimit\":{\"type\":\"BigNumber\",\"hex\":\"0x22a6ae\"},\"to\":\"0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789\",\"value\":{\"type\":\"BigNumber\",\"hex\":\"0x00\"},\"data\":\"0x1fad948c0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000896aacdb5e0e280c39f61da7004f5f30993678c400000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000d94de4af1ffef0a257e2895318c01e8de49ceab1000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000001b5f7000000000000000000000000000000000000000000000000000000000000ead400000000000000000000000000000000000000000000000000000000000122a400000000000000000000000000000000000000000000000000000000861c46980000000000000000000000000000000000000000000000000000000059682f100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000244912ccaa3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000000020000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000001758f42af7026fbbb559dc60ece0de3ef81f665e0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e49795300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002440d097c3000000000000000000000000f73501e5f730d1236a7921ff5a3e965b1e497953000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d4000031dd6d9d3a133e663660b959162870d755d4000000000000000000000000d39222801871b185ca8db59a62153ee6f07c03880000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004136ac64424a4b3a75ce12e9057f9d5e43c881ba364e2ad7a9e16612ff10a1eed02c656478c1304ccd54742074b44641e7281ff46dd22fc36c88f041fbab5c62211c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000041cd2e2a88d0ff63b8690bc6bcadd4e9802500fce7d294eb9d42becad975ec6b1f13235db7e058aaafcca1585085d7aeba46fcafff3c7ce1059ac960b4767c793f1b00000000000000000000000000000000000000000000000000000000000000\",\"accessList\":[],\"hash\":\"0x87440f4a4b7938a362892089fbd51fa8a95aea238d77b9f3b4f518d13748199e\",\"v\":1,\"r\":\"0x1e66db39a7c6181b8e04872ce4099d4c41231cf42fb5d0049e833a0438e46a1b\",\"s\":\"0x062174f47f2dbd7f830f0c581574f563761fa1fc4395709a789bb67d6943df02\",\"from\":\"0x896Aacdb5E0E280c39f61DA7004f5F30993678c4\",\"confirmations\":0}} on chainId 80001", + "path": "src/services/transaction-service/EVMTransactionService.js", + "hostname": "bundlers-statefulset-0", + "level": "info", + "timestamp": "2023-10-03 09:07:13 882" + }, + "resource": { + "type": "k8s_container", + "labels": { + "pod_name": "bundlers-statefulset-0", + "container_name": "bundlers-pod", + "namespace_name": "qabundler", + "cluster_name": "with-gcplb", + "project_id": "biconomy-test-397415", + "location": "us-east1-b" + } + }, + "timestamp": "2023-10-03T09:07:13.882490751Z", + "severity": "INFO", + "labels": { + "k8s-pod/app": "bundlers", + "k8s-pod/app_kubernetes_io/instance": "qabundler-test-bundler", + "compute.googleapis.com/resource_name": "gke-with-gcplb-default-pool-71afda7e-fbdt", + "k8s-pod/statefulset_kubernetes_io/pod-name": "bundlers-statefulset-0", + "k8s-pod/app_kubernetes_io/name": "bundlers", + "k8s-pod/namespace": "qabundler", + "k8s-pod/controller-revision-hash": "bundlers-statefulset-566644c5b4" + }, + "logName": "projects/biconomy-test-397415/logs/stdout", + "receiveTimestamp": "2023-10-03T09:07:15.520648899Z" +} +``` + +#### python code +``` +def execution_txids(data, txid_nonce): + result = [] + for e in data: + if e.get("jsonPayload") and e["jsonPayload"].get("message"): + if "Transaction execution response for transactionId" in e["jsonPayload"]["message"]: + json_string = e["jsonPayload"]["message"] + + + transaction_id_match = re.search(r'transactionId (0x[a-fA-F0-9]{64})', json_string) + transactionId = transaction_id_match.group(1) if transaction_id_match else None + + + transaction_hash_match = re.search(r'\"hash\":\"(0x[a-fA-F0-9]{64})\"', json_string) + transactionHash = transaction_hash_match.group(1) if transaction_id_match else None + _result = { + "timestamp": e["jsonPayload"]["timestamp"], + "transactionId": transactionId, + "pod_name": e["resource"]["labels"]["pod_name"], + "transactionHash": transactionHash, + "message": "Execution completed" + } + + try: + _result.update({"nonce": txid_nonce[transactionId]}) + except: + _result.update({"nonce": None}) + result.append(_result) + return result + +``` + +``` +execution = execution_txids(data, txid_nonce) +``` + +## Result: +Concatanate all the arrays and print, It will show the the execution of all the userop submitted + +``` +userop_nonces = userop_submitted_noce(data) +nonceTxIds, txid_nonce = nonce_to_txids(data) +submitted = submitted_to_blockchain(data, txid_nonce) +execution = execution_txids(data, txid_nonce) + +result = userop_nonces + nonceTxIds + submitted + execution +sorted_data = sorted(result, key=lambda x: x['timestamp']) +for entry in sorted_data: + print(f"{entry['timestamp']}\t{entry['pod_name']}\t{entry['nonce']}\t{entry['message']}") + +``` \ No newline at end of file diff --git a/install-bundler/TODO.md b/install-bundler/TODO.md new file mode 100644 index 00000000..6611f171 --- /dev/null +++ b/install-bundler/TODO.md @@ -0,0 +1,7 @@ + +- [ ] If new namespaces are working correctly and arent polluting other namespaces . +- [ ] Get rid of exeternalComponents directory and install redis cluster . +- [ ] Replicas of RabbitMQ . +- [ ] Replicas of MongoDB . +- [ ] Integrate datadog for production. +- [ ] Integrate secrets for production. \ No newline at end of file diff --git a/install-bundler/bundler-initial-setup.sh b/install-bundler/bundler-initial-setup.sh new file mode 100755 index 00000000..be434a48 --- /dev/null +++ b/install-bundler/bundler-initial-setup.sh @@ -0,0 +1,163 @@ +#!/usr/bin/env bash + +set -e +RED='\033[0;31m' +NC='\033[0m' +GREEN='\033[0;32m' +CONFIG_FILE=$1 +CURRENT_CONTEXT=$(kubectl config current-context) +CURRENT_PROJECT_ID=$(gcloud config get-value project 2>/dev/null) +CLUSTER_NAME=$(echo "$CURRENT_CONTEXT" | awk -F '_' '{print $NF}') +TRIMMED_CLUSTER_NAME=$(echo "$CLUSTER_NAME" | cut -c 1-8) + + +echo "Current cluster name is: $CLUSTER_NAME" +echo "Current trimmed cluster name is: $TRIMMED_CLUSTER_NAME" + +echo "This script must be run only once for a new bundler deployment in the namespace" + +# Check if the required arguments are passed +# Example: sh install-release-cloud.sh configs/testing.cfg +if [ "$#" -ne 1 ]; then + echo " ${RED} Usage: $0 for deployment ${NC}" + exit 1 +fi + + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +if [ ! -f "$DIR/$CONFIG_FILE" ]; then + echo "Error: Configuration file $DIR/$CONFIG_FILE not found. Choose from configs present in configs" + exit 1 +fi + +if [ ! -r "$DIR/$CONFIG_FILE" ]; then + echo "${RED} Error: Configuration file ${DIR}/${CONFIG_FILE} not readable. ${NC}" + exit 1 +fi + +# Load the configuration variables +# shellcheck disable=SC1090 +source "$DIR/$CONFIG_FILE" +# Sourced varaibles +# NAMESPACE +# PROJECT_ID +# IMAGE +# IMAGE_TAG +# DNS_NAME +# IP_NAME +# CHAINS_CFG_FILENAME +# CONTEXT +# SIMULATION_DATA_JSON +# TOKEN_PRICE_JSON +# SLACK_JSON +# PROVIDER_JSON +# DATASOURCES_JSON +# SOCKET_SERVICE_JSON +# QUEUE_URL + +# Check if the current context matches the one you're looking for +if [[ "$CURRENT_CONTEXT" == "$CONTEXT" ]]; then + echo "Context matches. Continuing..." + # Add your script logic here that should run when contexts match +else + echo "Context does not match. Exiting..." + exit 1 +fi + +# Fetch the current project ID from gcloud + +# Check if the project IDs match +if [[ "$CURRENT_PROJECT_ID" != "$PROJECT_ID" ]]; then + echo "${RED} Setting GCP PROJECT_ID to ${PROJECT_ID} ${NC}" + gcloud config set project "${PROJECT_ID}" +fi + +# Check if IP exists +IP_EXISTS=$(gcloud compute addresses list --filter="name=$IP_NAME" --global --format="get(name)") + +# If IP doesn't exist, create a new one +if [ -z "$IP_EXISTS" ]; then + echo "Global IP with name $IP_NAME doesn't exist creating it now" + gcloud compute addresses create "$IP_NAME" \ + --network-tier=PREMIUM \ + --ip-version=IPV4 \ + --global +else + echo "Global IP with name $IP_NAME already exists." +fi + +# Fetch and print the actual IP address +ACTUAL_IP=$(gcloud compute addresses describe "$IP_NAME" --global --format="get(address)") +# shellcheck disable=SC2059 +printf "${GREEN} The IP address for $IP_NAME is: $ACTUAL_IP ${NC}\n" + +# shellcheck disable=SC2059 +printf "${GREEN} Make sure you have added an A record against ${DNS_NAME} with ${ACTUAL_IP} ${NC}\n" + + +if ! kubectl get namespace "$NAMESPACE"; then + # shellcheck disable=SC2059 + printf "NAMESPACE ${NAMESPACE} doesnt exists, creating it now." + kubectl create ns "${NAMESPACE}" +else + # shellcheck disable=SC2059 + printf "${GREEN} SUCCESS: NAMESPACE $NAMESPACE exists ${NC}" +fi + + +# shellcheck disable=SC1091 +source "setup-scripts/create-variables.sh" "$DIR/$CONFIG_FILE" +echo "" +echo "Created variables:" +echo "CURRENT_CONTEXT=${CURRENT_CONTEXT}" +echo "CLUSTER_NAME=${CLUSTER_NAME}" +echo "TRIMMED_CLUSTER_NAME=${TRIMMED_CLUSTER_NAME}" +echo "TRIMMED_NAMESPACE=${TRIMMED_NAMESPACE}" +echo "GCP_IAM_ROLE=${GCP_IAM_ROLE}" +echo "GCP_ENCRYPTED_CONFIG_SECRET=${GCP_ENCRYPTED_CONFIG_SECRET}" +echo "GCP_PLAINTEXT_CONFIG_SECRET=${GCP_PLAINTEXT_CONFIG_SECRET}" +echo "SECRET_NAME=${SECRET_NAME}" +echo "KUBE_SERVICE_ACCOUNT=${KUBE_SERVICE_ACCOUNT}" + + +if gcloud secrets describe "${GCP_PLAINTEXT_CONFIG_SECRET}" &> /dev/null; then + echo "Secret ${GCP_PLAINTEXT_CONFIG_SECRET} already exists." +else + echo "GCP Secret ${GCP_PLAINTEXT_CONFIG_SECRET} does not exist." + if gcloud secrets create "${GCP_PLAINTEXT_CONFIG_SECRET}" \ + --replication-policy="automatic" ; then + echo "Secret ${GCP_PLAINTEXT_CONFIG_SECRET} created successfully." + else + echo "Failed to create secret ${GCP_PLAINTEXT_CONFIG_SECRET}." + fi +fi + +##This will create a configMap from the config.json.enc and that will be mounted on +## all the pods in a namespace. The assumption is that a single namespace will only +## have two deployments paymaster and bundler thats why +# for bundler the configMap name will be bundler-common-configMap + +bash setup-scripts/create-encrypted-config-secret.sh "$GCP_ENCRYPTED_CONFIG_SECRET" +bash setup-scripts/create-secret.sh "$SECRET_NAME" +bash setup-scripts/create-gcp-role.sh "$PROJECT_ID" "$GCP_IAM_ROLE" "$GCP_ENCRYPTED_CONFIG_SECRET" "$SECRET_NAME" +bash setup-scripts/gcp-role-binding.sh "$NAMESPACE" "$PROJECT_ID" "$GCP_IAM_ROLE" "$KUBE_SERVICE_ACCOUNT" +exit +REDIS_MASTER_REPLICA=1 +REDIS_READ_REPLICA=2 +sh install-redis/install.sh "${NAMESPACE}" "${REDIS_MASTER_REPLICA}" "${REDIS_READ_REPLICA}" + +RABBITMQ_REPLICA_COUNT=3 +sh install-rabbitmq/install.sh "${NAMESPACE}" "${RABBITMQ_REPLICA_COUNT}" + +sh install-mongo/install.sh "${NAMESPACE}" + +CENTRIFUGO_REPLICA_COUNT=3 +sh install-centrifugo/install.sh "${NAMESPACE}" "${CENTRIFUGO_REPLICA_COUNT}" + +# sh install-prometheus/install.sh +# bash install-prometheus-adapter/install.sh $ENV $NAMESPACE "$CHAINS_CFG_FILENAME" +echo "$CHAINS_CFG_FILENAME" +bash network/install.sh "${NAMESPACE}" "${DNS_NAME}" "${IP_NAME}" "${CHAINS_CFG_FILENAME}" + +# sh install-grafana/install.sh $ENV $NAMESPACE diff --git a/install-bundler/bundler-update-release.sh b/install-bundler/bundler-update-release.sh new file mode 100755 index 00000000..008c8988 --- /dev/null +++ b/install-bundler/bundler-update-release.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +set -e + +RED='\033[0;31m' +NC='\033[0m' + +CONFIG_FILE=$1 +CONTAINER_IMAGE_TAG=$2 + +echo "Running script $0" + +GIT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" +echo "GIT_ROOT is ${GIT_ROOT}" +CONFIG_FILE_PATH="${GIT_ROOT}/install-bundler/configs/${CONFIG_FILE}" +echo "CONFIG_FILE_PATH is ${CONFIG_FILE_PATH}" +echo ls -l "${GIT_ROOT}"/install-bundler/configs/ +ls -l "${GIT_ROOT}"/install-bundler/configs/ +git branch + +# Check if the required arguments are passed +# Example: sh install-release-cloud.sh configs/testing.cfg +if [ "$#" -gt 2 ]; then + # shellcheck disable=SC2059 + printf "${RED} Usage: $0 for deployment ${NC}\n" + echo "$#" + exit 1 +fi + + +if [ ! -f "${CONFIG_FILE_PATH}" ]; then + echo "Error: Configuration file ${CONFIG_FILE_PATH} not found. Choose from configs present in configs" + exit 1 +fi + +if [ ! -r "${CONFIG_FILE_PATH}" ]; then + echo "Error: Configuration file ${CONFIG_FILE_PATH} not readable." + exit 1 +fi + +if [[ -n "${CONTAINER_IMAGE_TAG}" ]] ; then + echo "Setting IMAGE_TAG to ${CONTAINER_IMAGE_TAG}" + sed -in -E "s/(^IMAGE_TAG=)(.*)/\1${CONTAINER_IMAGE_TAG}/g" "${CONFIG_FILE_PATH}" +fi + +echo "$0 calling ${GIT_ROOT}/install-bundler/bundler/install.sh ${CONFIG_FILE}" +bash "${GIT_ROOT}"/install-bundler/bundler/install.sh "${CONFIG_FILE}" diff --git a/install-bundler/bundler/Chart.yaml b/install-bundler/bundler/Chart.yaml new file mode 100644 index 00000000..6e72f6cc --- /dev/null +++ b/install-bundler/bundler/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: bundler +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "v0.0.1" diff --git a/install-bundler/bundler/install.sh b/install-bundler/bundler/install.sh new file mode 100755 index 00000000..e09cadaf --- /dev/null +++ b/install-bundler/bundler/install.sh @@ -0,0 +1,197 @@ +#!/usr/bin/env bash + +CONFIG_FILE=$1 +GIT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )" +CONFIG_FILE_PATH="${GIT_ROOT}/install-bundler/configs/${CONFIG_FILE}" + +echo "$0 Working with CONFIG_FILE_PATH ${CONFIG_FILE_PATH}" +# Function to print in green +print_green() { + echo -e "\033[0;32m$1\033[0m" +} + +if [ ! -f "${CONFIG_FILE_PATH}" ]; then + echo "Error: Configuration file ${CONFIG_FILE_PATH} not found. Choose from configs present in configs" + exit 1 +fi + +# shellcheck disable=SC1091 +print_green "Loading ${CONFIG_FILE_PATH}" +# shellcheck disable=SC1090 +source "${CONFIG_FILE_PATH}" +echo "Sourced variables from config file:" +echo "NAMESPACE=${NAMESPACE}" +echo "PROJECT_ID=${PROJECT_ID}" +echo "IMAGE=${IMAGE}" +echo "IMAGE_TAG=${IMAGE_TAG}" +echo "DNS_NAME=${DNS_NAME}" +echo "IP_NAME=${IP_NAME}" +echo "CHAINS_CFG_FILENAME=${CHAINS_CFG_FILENAME}" +echo "CONTEXT=${CONTEXT}" +echo "" + +if [[ -z "${NAMESPACE}" ]] ; then + echo "Required configuration variable NAMESPACE was not specified" + exit 1 +elif [[ -z "${PROJECT_ID}" ]] ; then + echo "Required configuration variable PROJECT_ID was not specified" + exit 1 +elif [[ -z "${IMAGE}" ]] ; then + echo "Required configuration variable IMAGE was not specified" + exit 1 +elif [[ -z "${IMAGE_TAG}" ]] ; then + echo "Required configuration variable IMAGE_TAG was not specified" + exit 1 +elif [[ -z "${DNS_NAME}" ]] ; then + echo "Required configuration variable DNS_NAME was not specified" + exit 1 +elif [[ -z "${IP_NAME}" ]] ; then + echo "Required configuration variable IP_NAME was not specified" + exit 1 +elif [[ -z "${CHAINS_CFG_FILENAME}" ]] ; then + echo "Required configuration variable CHAINS_CFG_FILENAME was not specified" + exit 1 +elif [[ -z "${CONTEXT}" ]] ; then + echo "Required configuration variable CONTEXT was not specified" + exit 1 +else + echo "All required config variables were specified" + echo "" +fi + +echo "" +echo "Setting default project to ${PROJECT_ID}" +gcloud config set project "${PROJECT_ID}" + +CURRENT_CONTEXT=$(kubectl config current-context) +if [[ "${CURRENT_CONTEXT}" != "${CONTEXT}" ]] ; then + echo "Setting kubectl context to ${CONTEXT}" + kubectl config use-context "${CONTEXT}" +else + echo "Kubectl context is ${CONTEXT}" +fi + +print_green "Loading ${GIT_ROOT}/install-bundler/setup-scripts/create-variables.sh" +# shellcheck disable=SC1091 +source "${GIT_ROOT}/install-bundler/setup-scripts/create-variables.sh" "${CONFIG_FILE_PATH}" +echo "Created variables:" +echo "CURRENT_CONTEXT=${CURRENT_CONTEXT}" +echo "CLUSTER_NAME=${CLUSTER_NAME}" +echo "TRIMMED_CLUSTER_NAME=${TRIMMED_CLUSTER_NAME}" +echo "TRIMMED_NAMESPACE=${TRIMMED_NAMESPACE}" +echo "GCP_IAM_ROLE=${GCP_IAM_ROLE}" +echo "GCP_ENCRYPTED_CONFIG_SECRET=${GCP_ENCRYPTED_CONFIG_SECRET}" +echo "GCP_PLAINTEXT_CONFIG_SECRET=${GCP_PLAINTEXT_CONFIG_SECRET}" +echo "SECRET_NAME=${SECRET_NAME}" +echo "KUBE_SERVICE_ACCOUNT=${KUBE_SERVICE_ACCOUNT}" +echo "" + + +echo "" +echo "Getting config values from GCP secret ${GCP_PLAINTEXT_CONFIG_SECRET}" +# when gcloud commands are run in githubaction always specify --project because +# it has the highest level of precedence +GCP_SECRET_CONFIG_VALUE=$(gcloud secrets versions access latest \ + --secret="${GCP_PLAINTEXT_CONFIG_SECRET}"\ + --project="${PROJECT_ID}") + +if [[ -z "${GCP_SECRET_CONFIG_VALUE}" ]] ; then + msj="" + msj="GCP_SECRET_CONFIG_VALUE can't be empty string." + msj="${msj} Please make sure that secret named ${GCP_PLAINTEXT_CONFIG_SECRET} has data" + echo "${msj}" + exit 1 +fi + +if ! kubectl get namespace "${NAMESPACE}"; then + echo "Error: ${NAMESPACE} doesnt exists, creating it now" + kubectl create ns "${NAMESPACE}" +else + # shellcheck disable=SC2059 + printf "${GREEN} SUCCESS: ${NAMESPACE} exists ${NC}\n" +fi + +# TODO: Add 2 more variables in chains.sh: memory and CPU, and pass those to helm +# TODO: rm printing of passwords to stdout + +HELM_RELEASE="bundler"; +SECONDS=0 # reset the SECONDS counter + +echo "Starting helm deployment" + +##Reading the relevant*-chains.sh under thechains folder in which all the chians are defined. +print_green "Loading ${GIT_ROOT}/install-bundler/configs/chains/$CHAINS_CFG_FILENAME" +# old source "./configs/chains/$CHAINS_CFG_FILENAME" +# shellcheck disable=SC1090 +source "${GIT_ROOT}/install-bundler/configs/chains/$CHAINS_CFG_FILENAME" +# example of sourced variables +# declare -A chain_mumbai=( +# [name]='chain-80001' +# [chainId]="80001" +# [autoScalingThreshholdHTTPRequestsPerMinute]=10000 +# [autoScalingThreshholdCPU]=2500m +# [minReplica]=8 +# [maxReplica]=8 +# ) + +array_names=$(declare -p | grep -Eo 'chain_[a-zA-Z0-9_]+') +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +for array_name in $array_names; do + # shellcheck disable=SC1087 + eval "NAME=\${$array_name[name]}" + # Using indirect referencing to get the "network" value of the current array + # shellcheck disable=SC1087 + eval "CHAIN_ID=\${$array_name[chainId]}" + # shellcheck disable=SC1087 + eval "AUTOSCALING_THRESHHOLD_HTTP_REQUESTS_PER_MINUTE=\${$array_name[autoScalingThreshholdHTTPRequestsPerMinute]}" + # shellcheck disable=SC1087 + eval "AUTOSCALING_THRESHHOLD_CPU=\${$array_name[autoScalingThreshholdCPU]}" + # shellcheck disable=SC1087 + eval "MIN_REPLICA=\${$array_name[minReplica]}" + # shellcheck disable=SC1087 + eval "MAX_REPLICA=\${$array_name[maxReplica]}" + + ADJ_AUTOSCALING_THRESHHOLD_HTTP_REQUESTS=$(bc -l <<< "$AUTOSCALING_THRESHHOLD_HTTP_REQUESTS_PER_MINUTE/60*1000" | cut -d'.' -f1)m + + echo "" + print_green "Name=$NAME" + print_green "CHAIN_ID=$CHAIN_ID" + print_green "AUTOSCALING_THRESHHOLD_CPU=$AUTOSCALING_THRESHHOLD_CPU" + print_green "MIN_REPLICA=$MIN_REPLICA" + print_green "MAX_REPLICA=$MAX_REPLICA" + print_green "ADJ_AUTOSCALING_THRESHHOLD_HTTP_REQUESTS=${ADJ_AUTOSCALING_THRESHHOLD_HTTP_REQUESTS}" + + echo "" + echo "Deploying HELM chart for $NAME $CHAIN_ID" + + # helm template "${HELM_RELEASE}-${CHAIN_ID}" "$DIR/." \ + helm upgrade --install "${HELM_RELEASE}-${CHAIN_ID}" "$DIR/." \ + --wait \ + --timeout 600s \ + --values "$DIR/values.yaml" \ + --namespace "$NAMESPACE" \ + --set nameOverride="$NAME" \ + --set env="$ENV" \ + --set environment="$environment" \ + --set-string namespace="$NAMESPACE" \ + --set secret.passphrase.value="$CONFIG_PASSPHRASE" \ + --set CHAIN_ID="$CHAIN_ID" \ + --set provider="$PROVIDER" \ + --set prometheus.enabled=true \ + --set hpa.average_http_requests_hpa="${ADJ_AUTOSCALING_THRESHHOLD_HTTP_REQUESTS}" \ + --set hpa.average_cpu_hpa="$AUTOSCALING_THRESHHOLD_CPU" \ + --set-string image.name="$IMAGE" \ + --set-string image.tag="$IMAGE_TAG" \ + --set-string gcpSecretManagerName="$GCP_SECRETS_MANAGER_NAME" \ + --set-string hpa.minReplicas="$MIN_REPLICA" \ + --set-string hpa.maxReplicas="$MAX_REPLICA" \ + --set-string projectId="$PROJECT_ID" \ + --set-string secretName="$SECRET_NAME" \ + --set-string configSecretName="$GCP_ENCRYPTED_CONFIG_SECRET" \ + --set-string plainConfigSecretName="$GCP_PLAINTEXT_CONFIG_SECRET" \ + --set-string serviceAccount="$KUBE_SERVICE_ACCOUNT" \ + --set datadog.enable=true \ + --set-string datadog.env="bundler-$NAMESPACE" \ + --set-string datadog.service="bundler-$NAMESPACE-$CHAIN_ID" +done diff --git a/install-bundler/bundler/pre-install.sh b/install-bundler/bundler/pre-install.sh new file mode 100644 index 00000000..3cb82d71 --- /dev/null +++ b/install-bundler/bundler/pre-install.sh @@ -0,0 +1,15 @@ + + +NAMESPACE=$1 +PROJECT_ID=$2 +SECRET_NAME=$3 +GCP_SERVICE_ACCOUNT=$4 +PASSPHRASE=$5 + + +gcloud iam service-accounts create $GCP_SERVICE_ACCOUNT --display-name="Read secrets service account in $NAMESPACE for bundler" + +echo "CONFIG_PASSPHRASE=$PASSPHRASE" | gcloud secrets create $SECRET_NAME --data-file=- +gcloud secrets add-iam-policy-binding $SECRET_NAME \ + --member=serviceAccount:$GCP_SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com \ + --role='roles/secretmanager.secretAccessor' \ No newline at end of file diff --git a/install-bundler/bundler/templates/_helpers.tpl b/install-bundler/bundler/templates/_helpers.tpl new file mode 100644 index 00000000..0f703d98 --- /dev/null +++ b/install-bundler/bundler/templates/_helpers.tpl @@ -0,0 +1,271 @@ +{{/* +Expand the name of the chart. +*/}} +{{ define "chart.name" }} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{- define "selectServer" -}} +{{- if eq .Values.env "production" -}} +https://acme-v02.api.letsencrypt.org/directory +{{- else -}} +https://acme-staging-v02.api.letsencrypt.org/directory +{{- end -}} +{{- end -}} + + + + +{{/* +DD-Trace lables and annotations +*/}} +{{- define "datadog.datatrace" }} +tags.datadoghq.com/env: {{ .Values.datadog.env }} +tags.datadoghq.com/service: {{ .Values.datadog.service }} +tags.datadoghq.com/version: {{ .Values.datadog.version }} +{{- end }} + +{{- define "datadog.datatrace-admission" }} +# Enable Admission Controller to mutate new pods part of this deployment +admission.datadoghq.com/enabled: "true" +{{ if and .Values.datadog.enable (eq .Values.datadog.gke_cluster_type "standard") }} +admission.datadoghq.com/config.mode: socket +# https://docs.datadoghq.com/containers/cluster_agent/admission_controller/?tab=helm#configure-apm-and-dogstatsd-communication-mode +{{- else if and .Values.datadog.enable (eq .Values.datadog.gke_cluster_type "autopilot") -}} +# gke autopilot cluster +{{- end }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + + +{{- define "chart.namespace" -}} + {{- if .Values.namespace -}} + {{- .Values.namespace -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "chart.labels" }} +helm.sh/chart: {{ include "chart.name" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ end }} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "chart.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" }} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: {{ include "chart.name" . }} +{{- end }} + +{{/* +Define metadata for configmap. +*/}} +{{- define "chart.staticconfigmap.metadata" -}} +name: {{ include "chart.name" . }}-env-configmap +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + +{{- define "chart.serviceaccount.name" -}} + + {{ include "chart.namespace" .}}-service-account +{{- end }} + +{{- define "chart.serviceaccount.metadata" -}} +name: {{ include "chart.serviceaccount.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} + +{{- end }} + +{{- define "chart.secret.name" -}} +{{ include "chart.name" . }}-passphrase +{{- end }} + +{{/* +Define metadata for chart secret. +*/}} +{{- define "chart.secret.metadata" -}} +name: {{ include "chart.secret.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + + + + +{{/* +Define metadata for chart pod. +*/}} +{{- define "chart.pod.metadata" -}} +name: {{ include "chart.name" . }}-r +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + +{{/*****************************************************************************/}} + + +{{/************************* Stateful set *******************************/}} +{{/* +Define metadata for statefulset. +*/}} + +{{- define "chart.statefulset.name" -}} +{{ include "chart.name" . }}-statefulset +{{- end }} + + +{{- define "chart.statefulset.metadata" -}} +name: {{ include "chart.statefulset.name" . }} +namespace: {{ include "chart.namespace" . }} + +labels: +{{- include "chart.labels" . | nindent 2 -}} +{{- if .Values.datadog.enable }} +{{ include "datadog.datatrace" . }} +{{- end }} +{{- end }} + +{{- define "chart.template.metadata.labels" -}} +app.kubernetes.io/name: {{ include "chart.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app: {{ include "chart.name" . }} +namespace: {{ include "chart.namespace" . }} +{{- if .Values.datadog.enable -}} +{{ include "datadog.datatrace" . }} +{{ include "datadog.datatrace-admission" . }} +{{- end }} +{{- end }} + + + +{{/* +Define annotations for statefulset's template.metadata +*/}} +{{- define "chart.template.metadata.annotations" -}} +{{- if .Values.datadog.enable }} +# https://github.com/DataDog/dd-trace-js/releases +admission.datadoghq.com/js-lib.version: {{ .Values.datadog.dd_js_lib_version }} +ad.datadoghq.com/nginx.logs: '[{"source": {{ include "chart.name" . }}-pod, "service":"webapp"}]' + +{{- end }} +releaseTime: {{ dateInZone "2006-01-02 15:04:05Z" (now) "UTC"| quote }} +{{- if .Values.prometheus.enabled }} +prometheus.io/scrape: "true" +prometheus.io/path: /metrics +prometheus.io/port: "3000" +{{- end }} +{{- end }} + + + +{{/*********************************************************************/}} +{{/************************ Ingress *************************/}} + + + +{{/* +Define metadata for datadog configmap. +*/}} +{{- define "chart.datadogconfigmap.metadata" -}} +name: {{ include "chart.name" . }}-datadogconfigmap +namespace: {{ include "chart.namespace" . }} +{{- end }} + +{{- define "chart.ingress.metadata" -}} +name: {{ include "chart.name" . }}-ingress +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} +{{/************************* Done *******************************/}} + +{{/************************* Prometheus *******************************/}} + +{{- define "chart.adapter.metadata" -}} +name: {{ include "chart.name" . }}-adapter-configmap +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + +{{/************************* Done *******************************/}} + +{{/************************* HPA *******************************/}} +{{- define "chart.hpa.name" -}} +{{ include "chart.name" . }}-hpa +{{- end }} + +{{- define "chart.hpa.metadata" -}} +name: {{ include "chart.hpa.name" . }} +namespace: {{ include "chart.namespace" . }} +chainId: {{.Values.CHAIN_ID}} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + + +{{/************************* Service *******************************/}} + +{{/* +Define metadata for service. +*/}} +{{- define "chart.service.name" -}} +{{ include "chart.name" . }}-service +{{- end }} + + +{{- define "chart.service.metadata" -}} +name: {{ include "chart.service.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +annotations: + cloud.google.com/neg: '{"ingress": true}' +{{- end }} diff --git a/install-bundler/bundler/templates/configmap-dd.yaml b/install-bundler/bundler/templates/configmap-dd.yaml new file mode 100644 index 00000000..e92d4c67 --- /dev/null +++ b/install-bundler/bundler/templates/configmap-dd.yaml @@ -0,0 +1,18 @@ +{{- if .Values.datadog.enable }} +apiVersion: v1 +kind: ConfigMap +metadata: + {{ include "chart.datadogconfigmap.metadata" . | nindent 2 }} + +data: + DD_TRACE_AGENT_URL: "unix:///var/run/datadog/apm.socket" + DD_ENV: {{ .Values.datadog.env }} + DD_SERVICE: {{ .Values.datadog.service }} + DD_VERSION: {{ .Values.datadog.version }} + DD_TRACE_DEBUG: {{ .Values.datadog.configs.DD_TRACE_DEBUG | quote }} + DD_TRACE_STARTUP_LOGS: {{ .Values.datadog.configs.DD_TRACE_STARTUP_LOGS | quote }} + DD_PROFILING_ENABLED: {{ .Values.datadog.configs.DD_PROFILING_ENABLED | quote }} + DD_LOGS_INJECTION: {{ .Values.datadog.configs.DD_LOGS_INJECTION | quote }} + DD_RUNTIME_METRICS_ENABLED: {{ .Values.datadog.configs.DD_RUNTIME_METRICS_ENABLED | quote }} + +{{- end }} \ No newline at end of file diff --git a/install-bundler/bundler/templates/envConfigMap.yaml b/install-bundler/bundler/templates/envConfigMap.yaml new file mode 100644 index 00000000..a284ca64 --- /dev/null +++ b/install-bundler/bundler/templates/envConfigMap.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: {{ include "chart.staticconfigmap.metadata" . | nindent 2 }} +data: + BUNDLER_CHAIN_ID: "{{ .Values.CHAIN_ID }}" + DD_PROFILING_ENABLED: "false" + DD_ENV: "bundler-tw-staging" + DD_SERVICE: {{ .Values.datadog.service }} + DD_VERSION: "1.0.3" diff --git a/install-bundler/bundler/templates/hpa.yaml b/install-bundler/bundler/templates/hpa.yaml new file mode 100644 index 00000000..0032a760 --- /dev/null +++ b/install-bundler/bundler/templates/hpa.yaml @@ -0,0 +1,42 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + # name: sample-app +{{ include "chart.hpa.metadata" . | nindent 2 }} + +spec: + scaleTargetRef: + # point the HPA at the sample application + # you created above + apiVersion: apps/v1 + kind: StatefulSet + name: {{ include "chart.statefulset.name" .}} + + # autoscale between 1 and 10 replicas + minReplicas: {{.Values.hpa.minReplicas}} + maxReplicas: {{.Values.hpa.maxReplicas}} + metrics: + # use a "Pods" metric, which takes the average of the + # given metric across all pods controlled by the autoscaling target + - type: Pods + pods: + # use the metric that you used above: pods/http_requests + metric: + name: {{.Values.CHAIN_ID}}-http-requests + # target 500 milli-requests per second, + # which is 1 request every two seconds + # http_requests_per_two_minute metric goes beyond 100, + # then you should set the averageValue for that metric + # to 100 (or 100000m, as 1 = 1000m). + target: + type: AverageValue + averageValue: {{.Values.hpa.average_http_requests_hpa}} + + + - type: Pods + pods: + metric: + name: {{.Values.CHAIN_ID}}-cpu-usage + target: + type: AverageValue + averageValue: {{.Values.hpa.average_cpu_hpa}} # Adjust the averageValue as per your requirements. \ No newline at end of file diff --git a/install-bundler/bundler/templates/secret.yaml b/install-bundler/bundler/templates/secret.yaml new file mode 100644 index 00000000..06251924 --- /dev/null +++ b/install-bundler/bundler/templates/secret.yaml @@ -0,0 +1,13 @@ +{{- if eq .Values.env "test" }} + +apiVersion: v1 +kind: Secret +metadata: + {{ include "chart.secret.metadata" . | nindent 2 }} + +type: Opaque +data: + # Assuming you want to use a base64 encoded secret for local test + {{ .Values.secret.passphrase.name }}: {{ .Values.secret.passphrase.value | b64enc | quote }} + +{{- end }} \ No newline at end of file diff --git a/install-bundler/bundler/templates/service.yaml b/install-bundler/bundler/templates/service.yaml new file mode 100644 index 00000000..14c0fafd --- /dev/null +++ b/install-bundler/bundler/templates/service.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: +{{ include "chart.service.metadata" . | nindent 2 }} +spec: + selector: + {{ include "chart.selectorLabels" . | nindent 4 }} + ports: + - protocol: TCP + port: {{ .Values.port }} + targetPort: {{ .Values.targetPort }} \ No newline at end of file diff --git a/install-bundler/bundler/templates/statefulset.yaml b/install-bundler/bundler/templates/statefulset.yaml new file mode 100644 index 00000000..f10eb891 --- /dev/null +++ b/install-bundler/bundler/templates/statefulset.yaml @@ -0,0 +1,186 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + {{- include "chart.statefulset.metadata" . | nindent 2 }} +spec: + serviceName: {{ include "chart.name" . }}-service + # updateStrategy: + # type: RollingUpdate + # rollingUpdate: + # partition: 0 + + replicas: {{.Values.hpa.minReplicas}} + selector: + matchLabels: + {{ include "chart.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{ include "chart.template.metadata.labels" . | nindent 8 }} + annotations: + {{ include "chart.template.metadata.annotations" . | nindent 8 }} + name: {{ include "chart.name" . }}-pod + spec: + serviceAccountName: {{ .Values.serviceAccount}} + initContainers: + - name: fetch-gcp-secrets + image: google/cloud-sdk:latest + command: + - /bin/sh + - -c + - | + gcloud secrets versions access latest --secret="{{.Values.secretName}}" > /gcpsecrets/config-passphrase + gcloud secrets versions access latest --secret="{{.Values.configSecretName}}" > /gcpsecrets/config.json.enc + gcloud secrets versions access latest --secret="{{.Values.plainConfigSecretName}}" > /gcpsecrets/{{.Values.environment}}.json + volumeMounts: + - name: gcpsecrets + mountPath: /gcpsecrets + + - name: init-ordinal + image: busybox:1.28 + command: + - "sh" + - "-c" + - > + echo $(echo $POD_NAME | awk -F '-' '{print $NF}') > /etc/podinfo/ordinal_index; + export POD_INDEX=$(cat /etc/podinfo/ordinal_index); + export NODE_CONFIG='{"relayer": {"nodePathIndex": '$POD_INDEX'}}'; + echo $NODE_CONFIG > /etc/podinfo/ordinal_index ; + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: podinfo + mountPath: /etc/podinfo + + {{- if .Values.affinity_tolerations.enable }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .Values.affinity_tolerations.key }} + operator: In + values: + - {{ .Values.affinity_tolerations.values }} + tolerations: + - effect: NoSchedule + key: {{ .Values.affinity_tolerations.key }} + operator: Equal + value: {{ .Values.affinity_tolerations.values }} + {{- end }} + containers: + - name: {{ include "chart.name" . }}-pod + imagePullPolicy: {{ if eq .Values.provider "local" }} "Never" {{ else }} "IfNotPresent" {{ end }} + image: {{ .Values.image.name }}:{{.Values.image.tag}} + + ports: + - containerPort: {{ .Values.targetPort }} + + env: + - name: CHAIN_ID + value: {{ .Values.CHAIN_ID | quote }} + + - name: NODE_ENV + value: {{ .Values.environment }} + + envFrom: + {{- if and (.Values.datadog.enable) ( eq .Values.datadog.gke_cluste_type "standard") -}} + - configMapRef: + name: {{ include "chart.name" . }}-datadogconfigmap + {{- end }} + + - configMapRef: + name: {{ include "chart.name" . }}-env-configmap + + {{- if eq .Values.env "test" }} + - secretRef: + name: {{ include "chart.name" . }}-passphrase + {{- end }} + + volumeMounts: + - mountPath: "/home/nonroot/bundler/config.json.enc" + name: gcpsecrets + subPath: "config.json.enc" + + - name: gcpsecrets + mountPath: "/home/nonroot/bundler/config/{{.Values.environment}}.json" + subPath: "{{.Values.environment}}.json" + + - name: gcpsecrets + mountPath: /gcpsecrets + + - name: podinfo + mountPath: /etc/podinfo + + {{- if and (.Values.datadog.enable) ( eq .Values.datadog.gke_cluste_type "standard") }} + - mountPath: /var/run/datadog + name: apmsocketpath + {{- end }} + resources: + requests: + memory: {{ .Values.resources.requests.memory }} + cpu: {{ .Values.resources.requests.cpu }} + limits: + memory: {{ .Values.resources.limits.memory }} + cpu: {{ .Values.resources.limits.cpu }} + + startupProbe: + httpGet: + path: /health + port: {{ .Values.port }} + failureThreshold: 40 + periodSeconds: 10 + + livenessProbe: + httpGet: + path: /health + port: {{ .Values.targetPort }} + initialDelaySeconds: 30 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 30 + failureThreshold: 5 + + readinessProbe: + httpGet: + path: /health + port: {{ .Values.targetPort }} + initialDelaySeconds: 10 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 10 + failureThreshold: 3 + + lifecycle: + # https://learnk8s.io/graceful-shutdown + # still server traffic for 15 seconds after k8s issued the SIG term + preStop: + exec: + command: ["sleep", "15"] + + volumes: + {{- if eq .Values.env "test" }} + + - name: config-volume + configMap: + name: bundler-common-configmap + {{- end}} + + - name: env-config-volume + configMap: + name: {{ include "chart.name" . }}-env-configmap + + - name: podinfo + emptyDir: {} + + - name: gcpsecrets + emptyDir: { } + + {{- if and (.Values.datadog.enable) ( eq .Values.datadog.gke_cluste_type "standard") }} + - hostPath: + path: /var/run/datadog/ + name: apmsocketpath + {{- end }} \ No newline at end of file diff --git a/install-bundler/bundler/test-secret-access-on-gcp.yaml b/install-bundler/bundler/test-secret-access-on-gcp.yaml new file mode 100644 index 00000000..3e81b629 --- /dev/null +++ b/install-bundler/bundler/test-secret-access-on-gcp.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-gcp-access + namespace: trustwallet +spec: + serviceAccountName: trustwal-trustwalle-prod-sa + containers: + - name: test-container + image: google/cloud-sdk:latest + command: ["/bin/sleep", "3600"] \ No newline at end of file diff --git a/install-bundler/bundler/values.yaml b/install-bundler/bundler/values.yaml new file mode 100644 index 00000000..974afd4e --- /dev/null +++ b/install-bundler/bundler/values.yaml @@ -0,0 +1,53 @@ +namespace: "" +env: "test" +hpa: + minReplicas: 2 + maxReplicas: 3 + average_http_requests_hpa: 20 + average_cpu_hpa: 80 + +port: "3000" +targetPort: "3000" +replicaCount: 2 +resources: + requests: + memory: "2500Mi" + cpu: "2500m" + limits: + memory: "2500Mi" + cpu: "2500m" + +# secret: +# projectID: biconomy-test +# key: bundlers +# config: +# name: config.json.enc + +# passphrase: +# key: passphrase +# name: CONFIG_PASSPHRASE +# version: latest + +# datadog configs +affinity_tolerations: + enable: false + +datadog: + enable: true + # gke_cluster_type: standard OR autopilot + gke_cluster_type: autopilot + # https://github.com/DataDog/dd-trace-js/releases + dd_js_lib_version: v4.18.0 + env: "testing2" + service: "sdk-relayer-service" + version: "v3.15.0" + configs: + DD_TRACE_DEBUG: "true" + DD_TRACE_STARTUP_LOGS: "false" + DD_PROFILING_ENABLED: "true" + DD_LOGS_INJECTION: "false" + DD_RUNTIME_METRICS_ENABLED: "true" + + logs: + enabled: false + containerCollectAll: false diff --git a/install-bundler/configs/bundler-tw-production.cfg b/install-bundler/configs/bundler-tw-production.cfg new file mode 100644 index 00000000..3a583dec --- /dev/null +++ b/install-bundler/configs/bundler-tw-production.cfg @@ -0,0 +1,10 @@ +ENV=prod +environment=production +NAMESPACE="trustwallet" +PROJECT_ID="prj-biconomy-prod-001" +IMAGE="us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler" +IMAGE_TAG=1.5 +DNS_NAME="tw-bundler.biconomy.io" +IP_NAME="trustwallet" +CHAINS_CFG_FILENAME="prod-trust-wallet-chains.sh" +CONTEXT="gke_prj-biconomy-prod-001_us-central1_trustwallet" \ No newline at end of file diff --git a/install-bundler/configs/bundler-tw-staging.cfg b/install-bundler/configs/bundler-tw-staging.cfg new file mode 100644 index 00000000..459b5d10 --- /dev/null +++ b/install-bundler/configs/bundler-tw-staging.cfg @@ -0,0 +1,10 @@ +ENV=staging +environment=staging +NAMESPACE="bundler-tw-staging" +PROJECT_ID="biconomy-prod" +IMAGE="us-docker.pkg.dev/biconomy-prod/bundler/trustwallet" +IMAGE_TAG=8d544d0 +DNS_NAME="bundler-tw-staging.biconomy.io" +IP_NAME="bundler-tw-staging" +CHAINS_CFG_FILENAME="staging-trust-wallet-chains.sh" +CONTEXT="gke_biconomy-prod_us-east1_dedicated-bundler" \ No newline at end of file diff --git a/install-bundler/configs/chains/prod-trust-wallet-chains.sh b/install-bundler/configs/chains/prod-trust-wallet-chains.sh new file mode 100644 index 00000000..9f2c7c77 --- /dev/null +++ b/install-bundler/configs/chains/prod-trust-wallet-chains.sh @@ -0,0 +1,73 @@ + +# declare -A chain_mumbai=( +# [name]='chain-80001' +# [chainId]="80001" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=1 +# [maxReplica]=20 +# ) + +# declare -A chain_bnb=( +# [name]='chain-56' +# [chainId]="56" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=6 +# [maxReplica]=6 +# ) + +declare -A chain_polygon=( + [name]='chain-137' + [chainId]="137" + [autoScalingThreshholdHTTPRequestsPerMingute]=1000 + [autoScalingThreshholdCPU]=800m + [minReplica]=6 + [maxReplica]=6) + + +# declare -A chain_arbitrum=( +# [name]='chain-42161' +# [chainId]="42161" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=3 +# [maxReplica]=3 +# ) + + +# declare -A chain_avalanche=( +# [name]='chain-43114' +# [chainId]="43114" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=3 +# [maxReplica]=3 +# ) + +# declare -A chain_opBNB=( +# [name]='chain-204' +# [chainId]="204" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=900m +# [minReplica]=3 +# [maxReplica]=3 +# ) + +# declare -A chain_base=( +# [name]='chain-8453' +# [chainId]="8453" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=3 +# [maxReplica]=3 +# ) + +# declare -A chain_optimism=( +# [name]='chain-10' +# [chainId]="10" +# [autoScalingThreshholdHTTPRequestsPerMinute]=1000 +# [autoScalingThreshholdCPU]=800m +# [minReplica]=3 +# [maxReplica]=3 +# ) \ No newline at end of file diff --git a/install-bundler/configs/chains/staging-trust-wallet-chains.sh b/install-bundler/configs/chains/staging-trust-wallet-chains.sh new file mode 100644 index 00000000..a7087db4 --- /dev/null +++ b/install-bundler/configs/chains/staging-trust-wallet-chains.sh @@ -0,0 +1,9 @@ + +# declare -A chain_mumbai=( +# [name]='chain-80001' +# [chainId]="80001" +# [autoScalingThreshholdHTTPRequestsPerMinute]=10000 +# [autoScalingThreshholdCPU]=2500m +# [minReplica]=2 +# [maxReplica]=2 +# ) \ No newline at end of file diff --git a/install-bundler/configs/chains/test-chains.sh b/install-bundler/configs/chains/test-chains.sh new file mode 100644 index 00000000..aeae34de --- /dev/null +++ b/install-bundler/configs/chains/test-chains.sh @@ -0,0 +1,13 @@ + +declare -A chain_mumbai=( + [name]='chain-80001' + [chainId]="80001" + [autoScalingThreshholdHTTPRequestsPerMinute]=10000 + [autoScalingThreshholdCPU]=2500m + [minRelayerCount]="200" + [maxRelayerCount]="220" + [fundingBalanceThreshold]="1" + [fundingRelayerAmount]="2" + [minReplica]=6 + [maxReplica]=9 +) \ No newline at end of file diff --git a/install-bundler/configs/example.cfg b/install-bundler/configs/example.cfg new file mode 100644 index 00000000..2ae56bb7 --- /dev/null +++ b/install-bundler/configs/example.cfg @@ -0,0 +1,17 @@ +ENV="prod" +NAMESPACE="bundlerprod" +PROJECT_ID="prj-biconomy-prod-001" +IMAGE="us-docker.pkg.dev/$PROJECT_ID/bundler/bundler" +IMAGE_TAG="0.1-testOnly" +DNS_NAME="bundler.bico-test.uk" +IP_NAME="bundlerprod" +CHAINS_CFG_FILENAME="test-chains.sh" +CONTEXT="gke_prj-biconomy-prod-001_us-central1_bundlerprod" +SIMULATION_DATA_JSON='{"tenderlyData": {}}' +TOKEN_PRICE_JSON='{"coinMarketCapApi": "a305bb95-7c48-4fb6-bc65-4de8c9193f2f"}' +SLACK_JSON='{"token": "xoxb-8708217855M7XEOBb4HD","channel": "4A9077TC5"}' +PROVIDER_JSON='{"137": "", "42161": "https://arb-ma", "56": "", "10": "", "43114": "", "8453": "", "80001": "", "204": ""}' +DATASOURCES_JSON='{"mongoUrl": "mongodb+srv://:@/relayer-node-service", "redisUrl": "redis://:@redis-master.bundlerprod.svc.cluster.local:6379"}' +SOCKET_SERVICE_JSON='{"wssUrl": "ws://centrifugo.bundlerprod.svc.cluster.local:9000/connection/websocket","httpUrl": "http://centrifugo.bundlerprod.svc.cluster.local:9000/api","token": "","apiKey": ""}' +QUEUE_URL="amqp://:@rabbitmq.bundlerprod.svc.cluster.local:5672" + diff --git a/install-bundler/grafana.md b/install-bundler/grafana.md new file mode 100644 index 00000000..0bca1da5 --- /dev/null +++ b/install-bundler/grafana.md @@ -0,0 +1,32 @@ + +# +Let’s assume you have two pods with names "statefulset-pod-1" and "statefulset-pod-2". + +If "statefulset-pod-1" has received 10 requests in the last 5 minutes, and "statefulset-pod-2" +has received 20 requests in the last 5 minutes, then the rate function will calculate the per-second +average rate for these two pods separately. + +After that, the sum by(pod) part will sum up these rates separately for each "pod" label, so you will + get separate sum results for "statefulset-pod-1" and "statefulset-pod-2". + +``` +sum by(pod) (rate(http_request_duration_seconds_bucket{pod=~".*statefulset.*"}[2m])) +``` + +# cpu +``` +sum by(pod) (rate(container_cpu_usage_seconds_total{pod=~".*statefulset.*"}[2m])) +``` + +# http requests per pod + +``` +sum(rate(http_requests_total{pod=~".*statefulset.*"}[2m])) by (pod) +``` + +## average http requests over all pods + http_requests_per_two_minute metric goes beyond 100, then you should set the averageValue for that metric to 100 (or 100000m, as 1 = 1000m). +``` +avg(rate(http_requests_total{pod=~".*statefulset.*"}[2m])) + +``` \ No newline at end of file diff --git a/install-bundler/install-bundler-common/bundler-common/.helmignore b/install-bundler/install-bundler-common/bundler-common/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/install-bundler/install-bundler-common/bundler-common/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/install-bundler-common/bundler-common/Chart.yaml b/install-bundler/install-bundler-common/bundler-common/Chart.yaml new file mode 100644 index 00000000..0e9fd00a --- /dev/null +++ b/install-bundler/install-bundler-common/bundler-common/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: bundler-common +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/install-bundler/install-bundler-common/bundler-common/templates/_helpers.tpl b/install-bundler/install-bundler-common/bundler-common/templates/_helpers.tpl new file mode 100644 index 00000000..3f411025 --- /dev/null +++ b/install-bundler/install-bundler-common/bundler-common/templates/_helpers.tpl @@ -0,0 +1,56 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "chart.namespace" -}} + {{- if .Values.namespace -}} + {{- .Values.namespace -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.name" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{/* +Define metadata for configmap. +*/}} +{{- define "chart.configmap.metadata" -}} +name: {{ include "chart.name" . }}-configmap +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + diff --git a/install-bundler/install-bundler-common/bundler-common/templates/encrptedConfigConfigmap.yaml b/install-bundler/install-bundler-common/bundler-common/templates/encrptedConfigConfigmap.yaml new file mode 100644 index 00000000..b43fadfc --- /dev/null +++ b/install-bundler/install-bundler-common/bundler-common/templates/encrptedConfigConfigmap.yaml @@ -0,0 +1,8 @@ + +apiVersion: v1 +kind: ConfigMap +metadata: +{{- include "chart.configmap.metadata" . | nindent 2 }} + +data: + config.json.enc : {{ .Files.Get "files/config.json.enc" }} diff --git a/install-bundler/install-bundler-common/install.sh b/install-bundler/install-bundler-common/install.sh new file mode 100644 index 00000000..6aa9b152 --- /dev/null +++ b/install-bundler/install-bundler-common/install.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e +NC='\033[0m' +YELLOW='\033[0;33m' + +ENV=$1 +NAMESPACE=$2 +HELM_RELEASE="bundler-common" + +# Check if the required arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 for Bundler common" + exit 1 +fi + + +printf "\n${YELLOW}------ Deploying %s to %s -----${NC}\n" "$HELM_RELEASE" "$NAMESPACE" +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/bundler-common/" \ + -n "$NAMESPACE" \ + --set nameOverride=bundler-common + +printf "bundler-common deployment completed\n"; \ No newline at end of file diff --git a/install-bundler/install-centrifugo/centrifugo/.helmignore b/install-bundler/install-centrifugo/centrifugo/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/install-centrifugo/centrifugo/Chart.yaml b/install-bundler/install-centrifugo/centrifugo/Chart.yaml new file mode 100644 index 00000000..d8f9a6bb --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +appVersion: 5.0.4 +description: Centrifugo is a scalable real-time messaging server in language-agnostic + way +home: https://centrifugal.dev +icon: https://centrifugal.dev/img/favicon.png +maintainers: +- email: frvzmb@gmail.com + name: FZambia +name: centrifugo +version: 11.0.7 diff --git a/install-bundler/install-centrifugo/centrifugo/README.md b/install-bundler/install-centrifugo/centrifugo/README.md new file mode 100644 index 00000000..bb0c990a --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/README.md @@ -0,0 +1,305 @@ +# Centrifugo + +This chart bootstraps a [Centrifugo](https://centrifugal.github.io/centrifugo/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.9+ + +## Get Repo Info + +```console +helm repo add centrifugal https://centrifugal.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] centrifugal/centrifugo + +# Helm 2 +$ helm install --name [RELEASE_NAME] centrifugal/centrifugo +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the Centrifugo chart and their default values. + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `nameOverride` | String to partially override centrifugo.fullname | `nil` | +| `fullnameOverride` | String to fully override centrifugo.fullname | `nil` | + +### Centrifugo common parameters + +| Parameter | Description | Default | +|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `image.registry` | Centrifugo image registry | `docker.io` | +| `image.repository` | Centrifugo image name | `centrifugo/centrifugo` | +| `image.tag` | Centrifugo image tag | Taken from chart `appVersion` | +| `image.pullPolicy` | Centrifugo image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `service.type` | service type | `ClusterIP` | +| `service.clusterIP` | service clusterIP IP | `nil` | +| `service.port` | service port | `8000` | +| `service.nodePort` | K8s service node port | `nil` | +| `service.appProtocol` | Set appProtocol field for port - it could be useful for manually setting protocols for Istio | `nil` | +| `service.useSeparateInternalService` | Use separate service for internal endpoints. It could be useful for configuring same port number for all services. | `false` | +| `service.useSeparateGrpcService` | Use separate service for GRPC endpoints. It could be useful for configuring same port number for all services. | `false` | +| `service.useSeparateUniGrpcService` | Use separate service for GRPC uni stream. It could be useful for configuring same port number for all services. | `false` | +| `internalService.type` | internal (for API, Prometheus metrics, admin web interface, health checks) port service type | `ClusterIP` | +| `internalService.clusterIP` | internal (for API, Prometheus metrics, admin web interface, health checks) service clusterIP IP | `nil` | +| `internalService.port` | internal (for API, Prometheus metrics, admin web interface, health checks) service port | `9000` | +| `internalService.nodePort` | internal (for API, Prometheus metrics, admin web interface, health checks) K8s service node port | `nil` | +| `internalService.appProtocol` | Set appProtocol field for port | `nil` | +| `grpcService.type` | GRPC API port service type | `ClusterIP` | +| `grpcService.clusterIP` | GRPC API service clusterIP IP | `nil` | +| `grpcService.port` | GRPC API service port | `10000` | +| `grpcService.nodePort` | GRPC API K8s service node port | `nil` | +| `grpcService.appProtocol` | Set appProtocol field for port | `nil` | +| `env` | Additional environment variables to be passed to Centrifugo container. | `nil` | +| `envSecret` | Additional secret environment variables to be passed to Centrifugo container. | `nil` | +| `config` | Centrifugo configuration, will be transformed into config.json file | `{"admin":true,"engine":"memory","namespaces":[],"v3_use_offset":true}` | +| `existingSecret` | Name of existing secret to use for secret's parameters. The secret has to contain the keys below | `nil` | +| `initContainers` | Set initContainers, e.g. wait for other resources | `nil` | +| `secrets.tokenHmacSecretKey` | Secret key for HMAC tokens. | `nil` | +| `secrets.adminPassword` | Admin password used to protect access to web interface. | `nil` | +| `secrets.adminSecret` | Admin secret used to create auth tokens on user login into admin web interface. | `nil` | +| `secrets.apiKey` | Centrifugo api_key for Centrifugo API endpoint authorization. | `nil` | +| `secrets.grpcApiKey` | Centrifugo grpc_api_key for Centrifugo GRPC API authorization. | `nil` | +| `secrets.redisAddress` | Connection string to Redis. | `nil` | +| `secrets.redisPassword` | Password for Redis. | `nil` | +| `secrets.redisSentinelPassword` | Password for Redis Sentinel. | `nil` | +| `secrets.natsUrl` | Connection string to Nats. | `nil` | +| `secrets.license` | Centrifugo PRO license | `nil` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------|----------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `nil` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `nil` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +## Concepts + +This chart by default starts Centrifugo with Memory engine. This means that you can only run one Centrifugo instance pod in default setup. If you need to run more pods to scale and load-balance connections between them – run Centrifugo with Redis engine or with Nats broker (for at most once PUB/SUB only). See examples below. + +Centrifugo service exposes 3 ports: + +* for client connections from the outside of your cluster. This is called external port: 8000 by default. +* internal port for API, Prometheus metrics, admin web interface, health checks. So these endpoints not available from the outside when enabling ingress. This is called internal port: 9000 by default. +* GRPC API port: 10000 by default. + +Ingress proxies on external port. + +## Configuration + +Chart follows usual practices when working with Helm. All Centrifugo configuration options can be set. You can set them using custom `values.yaml`: + +```yaml +config: + admin: false + namespaces: + - name: "chat" + - presence: true +``` + +And deploy with: + +```console +helm install [RELEASE_NAME] -f values.yaml centrifugal/centrifugo +``` + +Or you can override options using `--set` flag, for example: + +```console +helm install [RELEASE_NAME] centrifugal/centrifugo --set config.namespaces[0].name=chat --set config.namespaces[0].presence=true +``` + +This chart also defines several secrets. For example here is an example that configures HTTP API key and token HMAC secret key. + +```console +helm install [RELEASE_NAME] centrifugal/centrifugo --set secrets.apiKey= --set secrets.tokenHmacSecretKey= +``` + +See full list of supported secrets inside chart [values.yaml](https://github.com/centrifugal/helm-charts/blob/master/charts/centrifugo/values.yaml). + +## Scale with Redis engine + +Run Redis (here we are using Redis chart from bitnami, but you can use any other Redis deployment): + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install redis bitnami/redis --set auth.enabled=false +``` + +Then start Centrifugo with `redis` engine and pointing it to Redis: + +```console +helm install centrifugo -f values.yaml ./centrifugo --set config.engine=redis --set config.redis_address=redis://redis-master:6379 --set replicaCount=3 +``` + +Now example with Redis Sentinel (again using chart from bitnami): + +```console +helm install redis bitnami/redis --set auth.enabled=false --set cluster.enabled=true --set sentinel.enabled=true +``` + +Then point Centrifugo to Sentinel: + +```console +helm install centrifugo -f values.yaml ./centrifugo --set config.engine=redis --set config.redis_sentinel_master_name=mymaster --set config.redis_sentinel_address=redis:26379 --set replicaCount=3 +``` + +Example with Redis Cluster (using `bitnami/redis-cluster` chart, but again the way you run Redis is up to you actually): + +```console +helm install redis bitnami/redis-cluster --set usePassword=false +``` + +Then point Centrifugo to Redis Cluster: + +```console +helm install centrifugo -f values.yaml ./centrifugo --set config.engine=redis --set config.redis_cluster_address=redis-redis-cluster-0:6379 --set replicaCount=3 +``` + +Note: it's possible to set Redis URL and Redis/Sentinel passwords over secrets if needed. + +## With Nats broker + +```console +helm repo add nats https://nats-io.github.io/k8s/helm/charts/ +helm install nats nats/nats --set cluster.enabled=true +``` + +Then start Centrifugo pointing to Nats broker: + +```console +helm install centrifugo -f values.yaml ./centrifugo --set config.broker=nats --set config.nats_url=nats://nats:4222 --set replicaCount=3 +``` + +Note: it's possible to set Nats URL over secrets if needed. + +## Using initContainers + +You can define `initContainers` in your values.yaml to wait for other resources or to do some init jobs. `initContainers` might be useful to wait for your engine to be ready before starting centrifugo. + +### Redis + +```yaml +initContainers: + - name: wait-for-redis + image: ghcr.io/patrickdappollonio/wait-for:latest + env: + - name: REDIS_ADDRESS + value: "redis:6379" + command: + - /wait-for + args: + - --host="$(REDIS_ADDRESS)" + - --timeout=240s + - --verbose +``` + +### Example Nats + +```yaml +initContainers: + - name: wait-for-nats + image: ghcr.io/patrickdappollonio/wait-for:latest + env: + - name: NATS_ADDRESS + value: "nats:4222" + command: + - /wait-for + args: + - --host="$(NATS_ADDRESS)" + - --timeout=240s + - --verbose +``` + +## Upgrading + +### v10 -> v11 + +Major bump to 11.0.0 caused by breaking change in horizontal pod autoscaling configuration. See changes in https://github.com/centrifugal/helm-charts/pull/64 for more details. TLDR: cpu scaling should be explicitly enabled now, cpu and memory configuration moved to nested object, both now have `enabled` flags for granular configuraion. + +### v9 -> v10 + +In v10 we are using Centrifugo v5 as base appVersion. See [Centrifugo v5.0.0 release notes](https://github.com/centrifugal/centrifugo/releases/tag/v5.0.0). + +### v8 -> v9 + +In v9 we are using Centrifugo v4 as base appVersion. See [Centrifugo v4.0.0 release notes](https://github.com/centrifugal/centrifugo/releases/tag/v4.0.0). + +### v7 -> v8 + +In v8 version we are fixing an inconsistency in `existingSecret` option names reported in [#33](https://github.com/centrifugal/helm-charts/issues/33). + +So, in `existingSecret`: + +* admin_password -> adminPassword +* admin_secret -> adminSecret +* token_hmac_secret_key -> tokenHmacSecretKey +* api_key -> apiKey +* grpc_api_key -> grpcApiKey +* redis_address -> redisAddress +* redis_password -> redisPassword +* redis_sentinel_password -> redisSentinelPassword +* nats_url -> natsUrl + +### v5 -> v6 + +v6 aims to simplify chart configuration and make it a bit more idiomatic. See pull request [#6](https://github.com/centrifugal/helm-charts/pull/6) for all the changes. + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - Three type of services were moved to their own block. + - To enable separate services use `useSeparateInternalService` and `useSeparateGrpcService` and `useSeparateUniGrpcService` flags. + - `ServiceMonitor` move to block `metrics` with additional parameters, `labels` renamed to `additionalLabels` - removed configuration block `centrifugo`, all configuration under that block moved to top level. + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. diff --git a/install-bundler/install-centrifugo/centrifugo/templates/NOTES.txt b/install-bundler/install-centrifugo/centrifugo/templates/NOTES.txt new file mode 100644 index 00000000..69902f60 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/NOTES.txt @@ -0,0 +1,20 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "centrifugo.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "centrifugo.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "centrifugo.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "centrifugo.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8000:8000 +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/_helpers.tpl b/install-bundler/install-centrifugo/centrifugo/templates/_helpers.tpl new file mode 100644 index 00000000..702df465 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/_helpers.tpl @@ -0,0 +1,121 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "centrifugo.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "centrifugo.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "centrifugo.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "centrifugo.labels" -}} +helm.sh/chart: {{ include "centrifugo.chart" . }} +{{ include "centrifugo.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "centrifugo.selectorLabels" -}} +app.kubernetes.io/name: {{ include "centrifugo.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "centrifugo.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "centrifugo.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "centrifugo.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "centrifugo.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := coalesce .Values.image.tag (printf "v%s" .Chart.AppVersion ) | toString -}} +{{- if .Values.global -}} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "centrifugo.imagePullSecrets" -}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if .Values.image.pullSecrets}} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{- define "centrifugo.secretName" -}} +{{- if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "centrifugo.fullname" .) -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/configmap.yaml b/install-bundler/install-centrifugo/centrifugo/templates/configmap.yaml new file mode 100644 index 00000000..4942c023 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "centrifugo.fullname" . }}-config + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} +data: + config.json: |- +{{ toJson .Values.config| indent 4 }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/deployment.yaml b/install-bundler/install-centrifugo/centrifugo/templates/deployment.yaml new file mode 100644 index 00000000..b4c7a64c --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/deployment.yaml @@ -0,0 +1,191 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "centrifugo.fullname" . }} + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "centrifugo.selectorLabels" . | nindent 6 }} + {{- if .Values.deploymentStrategy }} + strategy: + {{- toYaml .Values.deploymentStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "centrifugo.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- with .Values.topologySpreadConstraints }} + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- include "centrifugo.imagePullSecrets" . | indent 6 }} + serviceAccountName: {{ include "centrifugo.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + volumes: + - name: {{ include "centrifugo.fullname" . }}-config + configMap: + name: {{ include "centrifugo.fullname" . }}-config + {{- with .Values.volumes }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.initContainers}} + initContainers: + {{- with .Values.initContainers }} + {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: {{ include "centrifugo.image" .}} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - centrifugo + args: + - --health + - --prometheus + - --port + - "{{ .Values.service.port }}" + - --internal_port + - "{{ .Values.internalService.port }}" + - --grpc_api + - --grpc_api_port + - "{{ .Values.grpcService.port }}" + - --uni_grpc_port + - "{{ .Values.uniGrpcService.port }}" + env: + - name: CENTRIFUGO_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: adminPassword + - name: CENTRIFUGO_ADMIN_SECRET + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: adminSecret + - name: CENTRIFUGO_TOKEN_HMAC_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: tokenHmacSecretKey + optional: true + - name: CENTRIFUGO_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: apiKey + optional: true + - name: CENTRIFUGO_GRPC_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: grpcApiKey + optional: true + - name: CENTRIFUGO_REDIS_ADDRESS + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: redisAddress + optional: true + - name: CENTRIFUGO_REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: redisPassword + optional: true + - name: CENTRIFUGO_REDIS_SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: redisSentinelPassword + optional: true + - name: CENTRIFUGO_NATS_URL + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: natsUrl + optional: true + - name: CENTRIFUGO_LICENSE + valueFrom: + secretKeyRef: + name: {{ include "centrifugo.secretName" . }} + key: license + optional: true + {{- range .Values.envSecret }} + - name: {{ .name }} + valueFrom: + secretKeyRef: + {{- toYaml .secretKeyRef | nindent 18 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + volumeMounts: + - name: "{{ include "centrifugo.fullname" . }}-config" + mountPath: "/centrifugo" + readOnly: true + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 12 }} + {{- end }} + ports: + - name: external + containerPort: {{ .Values.service.port }} + - name: internal + containerPort: {{ .Values.internalService.port }} + - name: grpc + containerPort: {{ .Values.grpcService.port }} + - name: uni-grpc + containerPort: {{ .Values.uniGrpcService.port }} + livenessProbe: + httpGet: + path: /health + port: {{ .Values.internalService.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: {{ .Values.internalService.port }} + initialDelaySeconds: 3 + periodSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/hpa.yaml b/install-bundler/install-centrifugo/centrifugo/templates/hpa.yaml new file mode 100644 index 00000000..878d6319 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- if .Values.autoscaling.enabled }} +{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} +apiVersion: "autoscaling/v2" +{{- else }} +apiVersion: "autoscaling/v2beta1" +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "centrifugo.fullname" . }} + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "centrifugo.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- with .Values.autoscalingTemplate }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- if .Values.autoscaling.cpu.enabled }} + - type: Resource + resource: + name: cpu + {{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.cpu.targetCPUUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .Values.autoscaling.cpu.targetCPUUtilizationPercentage }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.memory.enabled }} + - type: Resource + resource: + name: memory + {{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.memory.targetMemoryUtilizationPercentage }} + {{- else }} + targetAverageUtilization: {{ .Values.autoscaling.memory.targetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + {{- with .Values.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/ingress-internal.yaml b/install-bundler/install-centrifugo/centrifugo/templates/ingress-internal.yaml new file mode 100644 index 00000000..f39749c7 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/ingress-internal.yaml @@ -0,0 +1,64 @@ +{{- if .Values.ingressInternal.enabled }} +{{- $fullName := include "centrifugo.fullname" . }} +{{- $namespace := include "centrifugo.namespace" . }} +{{- $svcPort := .Values.internalService.port }} +{{- if or ( gt .Capabilities.KubeVersion.Major "1" ) ( ge .Capabilities.KubeVersion.Minor "19" ) }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }}-internal + namespace: {{ $namespace }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.ingressInternal.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingressInternal.ingressClassName }} + ingressClassName: {{ .Values.ingressInternal.ingressClassName }} + {{- end }} + {{- if .Values.ingressInternal.tls }} + tls: + {{- range .Values.ingressInternal.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingressInternal.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if or ( gt $.Capabilities.KubeVersion.Major "1" ) ( ge $.Capabilities.KubeVersion.Minor "19" ) }} + pathType: {{ $.Values.ingressInternal.pathType }} + {{- end }} + backend: + {{- if or ( gt $.Capabilities.KubeVersion.Major "1" ) ( ge $.Capabilities.KubeVersion.Minor "19" ) }} + service: + {{- if $.Values.service.useSeparateInternalService }} + name: {{ $fullName }}-internal + {{- else }} + name: {{ $fullName }} + {{- end }} + port: + number: {{ $svcPort }} + {{- else }} + {{- if $.Values.service.useSeparateInternalService }} + serviceName: {{ $fullName }}-internal + {{- else }} + serviceName: {{ $fullName }} + {{- end }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/ingress.yaml b/install-bundler/install-centrifugo/centrifugo/templates/ingress.yaml new file mode 100644 index 00000000..64773ffd --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/ingress.yaml @@ -0,0 +1,56 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "centrifugo.fullname" . -}} +{{- $namespace := include "centrifugo.namespace" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if or ( gt .Capabilities.KubeVersion.Major "1" ) ( ge .Capabilities.KubeVersion.Minor "19" ) }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ $namespace }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . }} + {{- if or ( gt $.Capabilities.KubeVersion.Major "1" ) ( ge $.Capabilities.KubeVersion.Minor "19" ) }} + pathType: {{ $.Values.ingress.pathType }} + {{- end }} + backend: + {{- if or ( gt $.Capabilities.KubeVersion.Major "1" ) ( ge $.Capabilities.KubeVersion.Minor "19" ) }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/poddisruptionbudget.yaml b/install-bundler/install-centrifugo/centrifugo/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000..24abe26c --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "centrifugo.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + {{- include "centrifugo.selectorLabels" . | nindent 6 }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/secret.yaml b/install-bundler/install-centrifugo/centrifugo/templates/secret.yaml new file mode 100644 index 00000000..149cc47b --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/secret.yaml @@ -0,0 +1,45 @@ +{{- if not .Values.existingSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "centrifugo.fullname" . }} + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} +type: Opaque +data: + {{- if .Values.secrets.tokenHmacSecretKey }} + tokenHmacSecretKey: {{ .Values.secrets.tokenHmacSecretKey | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.apiKey }} + apiKey: {{ .Values.secrets.apiKey | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.grpcApiKey }} + grpcApiKey: {{ .Values.secrets.grpcApiKey | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.adminPassword }} + adminPassword: {{ .Values.secrets.adminPassword | b64enc | quote }} + {{- else }} + adminPassword: {{ randAlphaNum 12 | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.adminSecret }} + adminSecret: {{ .Values.secrets.adminSecret | b64enc | quote }} + {{- else }} + adminSecret: {{ randAlphaNum 24 | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.redisAddress }} + redisAddress: {{ .Values.secrets.redisAddress | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.redisPassword }} + redisPassword: {{ .Values.secrets.redisPassword | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.redisSentinelPassword }} + redisSentinelPassword: {{ .Values.secrets.redisSentinelPassword | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.natsUrl }} + natsUrl: {{ .Values.secrets.natsUrl | b64enc | quote }} + {{- end }} + {{- if .Values.secrets.license }} + license: {{ .Values.secrets.license | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/service-grpc-api.yaml b/install-bundler/install-centrifugo/centrifugo/templates/service-grpc-api.yaml new file mode 100644 index 00000000..df2ccb2e --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/service-grpc-api.yaml @@ -0,0 +1,31 @@ +{{- if .Values.service.useSeparateGrpcService -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "centrifugo.fullname" . }}-grpc-api + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.grpcService.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.grpcService.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.grpcService.type }} + ports: + - port: {{ .Values.grpcService.port }} + targetPort: grpc + protocol: TCP + {{- if .Values.grpcService.appProtocol }} + appProtocol: {{ .Values.grpcService.appProtocol }} + {{- end }} + name: grpc + {{- if (and (eq .Values.grpcService.type "NodePort") (not (empty .Values.grpcService.nodePort))) }} + nodePort: {{ .Values.grpcService.nodePort }} + {{- end }} + selector: + {{- include "centrifugo.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/service-internal.yaml b/install-bundler/install-centrifugo/centrifugo/templates/service-internal.yaml new file mode 100644 index 00000000..062f085c --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/service-internal.yaml @@ -0,0 +1,31 @@ +{{- if .Values.service.useSeparateInternalService -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "centrifugo.fullname" . }}-internal + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.internalService.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.internalService.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.internalService.type }} + ports: + - port: {{ .Values.internalService.port }} + targetPort: internal + protocol: TCP + {{- if .Values.internalService.appProtocol }} + appProtocol: {{ .Values.internalService.appProtocol }} + {{- end }} + name: internal + {{- if (and (eq .Values.internalService.type "NodePort") (not (empty .Values.internalService.nodePort))) }} + nodePort: {{ .Values.internalService.nodePort }} + {{- end }} + selector: + {{- include "centrifugo.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/service-uni-grpc.yaml b/install-bundler/install-centrifugo/centrifugo/templates/service-uni-grpc.yaml new file mode 100644 index 00000000..5a1b50a0 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/service-uni-grpc.yaml @@ -0,0 +1,31 @@ +{{- if .Values.service.useSeparateUniGrpcService -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "centrifugo.fullname" . }}-uni-grpc + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.uniGrpcService.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.uniGrpcService.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.uniGrpcService.type }} + ports: + - port: {{ .Values.uniGrpcService.port }} + targetPort: uni-grpc + protocol: TCP + {{- if .Values.uniGrpcService.appProtocol }} + appProtocol: {{ .Values.uniGrpcService.appProtocol }} + {{- end }} + name: uni-grpc + {{- if (and (eq .Values.uniGrpcService.type "NodePort") (not (empty .Values.uniGrpcService.nodePort))) }} + nodePort: {{ .Values.uniGrpcService.nodePort }} + {{- end }} + selector: + {{- include "centrifugo.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/service.yaml b/install-bundler/install-centrifugo/centrifugo/templates/service.yaml new file mode 100644 index 00000000..39f5b0d1 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/service.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "centrifugo.fullname" . }} + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: external + protocol: TCP + {{- if .Values.service.appProtocol }} + appProtocol: {{ .Values.service.appProtocol }} + {{- end }} + name: external + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} +{{- if not .Values.service.useSeparateInternalService }} + - port: {{ .Values.internalService.port }} + targetPort: internal + protocol: TCP + {{- if .Values.internalService.appProtocol }} + appProtocol: {{ .Values.internalService.appProtocol }} + {{- end }} + name: internal + {{- if (and (eq .Values.internalService.type "NodePort") (not (empty .Values.internalService.nodePort))) }} + nodePort: {{ .Values.internalService.nodePort }} + {{- end }} +{{- end }} +{{- if not .Values.service.useSeparateGrpcService }} + - port: {{ .Values.grpcService.port }} + targetPort: grpc + protocol: TCP + {{- if .Values.grpcService.appProtocol }} + appProtocol: {{ .Values.grpcService.appProtocol }} + {{- end }} + name: grpc + {{- if (and (eq .Values.grpcService.type "NodePort") (not (empty .Values.grpcService.nodePort))) }} + nodePort: {{ .Values.grpcService.nodePort }} + {{- end }} +{{- end }} +{{- if not .Values.service.useSeparateUniGrpcService }} + - port: {{ .Values.uniGrpcService.port }} + targetPort: uni-grpc + protocol: TCP + {{- if .Values.uniGrpcService.appProtocol }} + appProtocol: {{ .Values.uniGrpcService.appProtocol }} + {{- end }} + name: uni-grpc + {{- if (and (eq .Values.uniGrpcService.type "NodePort") (not (empty .Values.uniGrpcService.nodePort))) }} + nodePort: {{ .Values.uniGrpcService.nodePort }} + {{- end }} +{{- end }} + selector: + {{- include "centrifugo.selectorLabels" . | nindent 4 }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/serviceaccount.yaml b/install-bundler/install-centrifugo/centrifugo/templates/serviceaccount.yaml new file mode 100644 index 00000000..4bde8aeb --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "centrifugo.serviceAccountName" . }} + namespace: {{ include "centrifugo.namespace" . }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-centrifugo/centrifugo/templates/servicemonitor.yaml b/install-bundler/install-centrifugo/centrifugo/templates/servicemonitor.yaml new file mode 100644 index 00000000..320b0847 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/templates/servicemonitor.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "centrifugo.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ include "centrifugo.namespace" . }} + {{- end }} + labels: + {{- include "centrifugo.labels" . | nindent 4 }} + {{- range $key, $value := .Values.metrics.serviceMonitor.additionalLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.annotations }} + annotations: + {{- range $key, $value := .Values.metrics.serviceMonitor.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} +spec: + endpoints: + - port: internal + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + selector: + matchLabels: + {{- include "centrifugo.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/install-bundler/install-centrifugo/centrifugo/values.yaml b/install-bundler/install-centrifugo/centrifugo/values.yaml new file mode 100644 index 00000000..14701799 --- /dev/null +++ b/install-bundler/install-centrifugo/centrifugo/values.yaml @@ -0,0 +1,331 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName + +replicaCount: 1 + +image: + registry: docker.io + repository: centrifugo/centrifugo + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +namespaceOverride: "" +priorityClassName: "" + +service: + ## Service type + ## + type: ClusterIP + ## Service port + ## + port: 8000 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Provide any additional annotations which may be required + ## + annotations: {} + ## Provide any additional labels which may be required + ## + labels: {} + ## + ## Specify custom appProtocol for a service port. + appProtocol: "" + ## + ## Use separate service for internal endpoints. It could be useful for configuring same port number for all services. + useSeparateInternalService: false + ## Use separate service for GRPC API endpoints. It could be useful for configuring same port number for all services. + useSeparateGrpcService: false + ## Use separate service for GRPC unidirectional stream. It could be useful for configuring same port number for all services. + useSeparateUniGrpcService: false + +internalService: + port: 9000 + type: ClusterIP + nodePort: "" + # Static NodePort, if set. + # nodePort: 30101 + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/path: "/metrics" + # prometheus.io/port: "9000" + ## Specify custom appProtocol for a service port. + labels: {} + appProtocol: "" + +grpcService: + port: 10000 + type: ClusterIP + nodePort: "" + # Static NodePort, if set. + # nodePort: 30102 + annotations: {} + ## Specify custom appProtocol for a service port. + labels: {} + appProtocol: "" + +uniGrpcService: + port: 11000 + type: ClusterIP + nodePort: "" + # Static NodePort, if set. + # nodePort: 30103 + annotations: {} + ## Specify custom appProtocol for a service port. + labels: {} + appProtocol: "" + +ingress: + enabled: false + + # Optionally set the ingressClassName. k8s >= 1.18 + ingressClassName: "" + + # pathType override - see: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types + pathType: Prefix + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # + # To run on custom path: + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + hosts: [] + # - host: centrifugo.local + # paths: + # - / + # - host: centrifugo-with-prefix.local + # paths: + # - /test(/|$)(.*) + # https://kubernetes.github.io/ingress-nginx/examples/tls-termination/ + tls: [] + # - secretName: centrifugo-example-tls + # hosts: + # - centrifugo.local + +ingressInternal: + # !!! ATTENTION !!! + # Be careful in exposing internal services by ingressInternal. Make sure + # you understand which Centrifugo endpoints are exposed in this case (server API, + # admin, Prometheus metrics, healthcheck, etc.). If you really need exposing internal + # endpoints consider limiting access to the ingress from the outside by load balancer + # rules, probably per specific path. Probably `admin_external` or `api_external` + # options which expose corresponding handlers on the external ingress will work better + # for you. + enabled: false + + # Optionally set the ingressClassName. k8s >= 1.18 + ingressClassName: "" + + # pathType override - see: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types + pathType: Prefix + + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # + # To run on custom path: + # nginx.ingress.kubernetes.io/rewrite-target: /$2 + hosts: [] + # - host: centrifugo.local + # paths: + # - / + # - host: centrifugo-with-prefix.local + # paths: + # - /test(/|$)(.*) + # https://kubernetes.github.io/ingress-nginx/examples/tls-termination/ + tls: [] + # - secretName: centrifugo-example-tls + # hosts: + # - centrifugo.local + +resources: + requests: + memory: "100Mi" + cpu: "200m" + limits: + memory: "512Mi" + cpu: "512m" + +serviceAccount: + # Specifies whether a service account should be created. + create: true + # Annotations to add to the service account. + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template. + name: "" + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + cpu: + enabled: false + targetCPUUtilizationPercentage: 80 + memory: + enabled: false + targetMemoryUtilizationPercentage: 80 + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#default-behavior + behavior: {} + +autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis + # ref: https://github.com/kubernetes-sigs/prometheus-adapter/ + # - type: Pods + # pods: + # metric: + # # kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/hpa_custom_metric_centrifugo_node_num_clients" | jq . + # name: hpa_custom_metric_centrifugo_node_num_clients + # target: + # type: AverageValue + # averageValue: 10000m # NOTE: # 10000m = 10 actual metric value (10 clients) + +podDisruptionBudget: + enabled: false + minAvailable: 1 + +terminationGracePeriodSeconds: 30 + +podAnnotations: {} +podLabels: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + # + # You can also tune sysctl, ex.: + # sysctls: + # - name: net.core.somaxconn + # value: "2048" + +deploymentStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +metrics: + enabled: false + serviceMonitor: + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + ## + # namespace: "" + ## Specify the interval at which metrics should be scraped + ## + interval: 30s + ## Specify the timeout after which the scrape is ended + ## + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + ## + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## Can be used to specify the release label for ServiceMonitor. Sometimes it should be custom for prometheus operator to work. + additionalLabels: {} + ## Set custom annotations. + annotations: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Additional environment variables to be passed to Centrifugo container. +env: {} + +# Additional secret environment variables to be passed to Centrifugo container. +envSecret: [] + +# Centrifugo configuration, will be transformed into config.json file. +config: + # Engine to use. Default memory engine allows running only one Centrifugo pod. + # Scale to many pods with Redis engine or Nats broker. Refer to Centrifugo + # documentation: https://centrifugal.github.io/centrifugo/server/engines/ + engine: "memory" + + # Enable admin web interface by default. + admin: true + + # Array of namespaces. + namespaces: [] + +# Additional volumes for Centrifugo deployment. +volumes: [] + # - name: volume + # secret: + # secretName: volumeSecretName + +# Additional volume mounts for Centrifugo container. +volumeMounts: [] + # - name: volume + # mountPath: "/volume" + # readOnly: true + + +# TopologySpreadConstrains, e.g. for spreading pods across nodes +# see https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ +topologySpreadConstraints: "" + +# Init Containers, e.g. for waiting for other resources like redis (evaluated as template) +# see https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +initContainers: "" + +# existingSecret: my-secret + +# Centrifugo secrets. +secrets: + # Secret key for HMAC tokens. + tokenHmacSecretKey: "" + + # Admin password used to protect access to web interface. + adminPassword: "" + + # Admin secret used to create auth tokens on user login into admin web interface. + adminSecret: "" + + # Centrifugo api_key for Centrifugo API endpoint authorization. + apiKey: "" + + # Centrifugo grpc_api_key for Centrifugo GRPC API authorization. + grpcApiKey: "" + + # Connection string to Redis. + redisAddress: "" + + # Password for Redis. + redisPassword: "" + + # Password for Redis Sentinel. + redisSentinelPassword: "" + + # Connection string to Nats. + natsUrl: "" + + # Centrifugo PRO license. + license: "" \ No newline at end of file diff --git a/install-bundler/install-centrifugo/install.sh b/install-bundler/install-centrifugo/install.sh new file mode 100644 index 00000000..3ae7e359 --- /dev/null +++ b/install-bundler/install-centrifugo/install.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -e + +NAMESPACE=$1 +REPLICA_COUNT=$2 +# Check if the required arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 for centrifugo" + exit 1 +fi + +HELM_RELEASE="centrifugo"; + +# shellcheck disable=SC2059 +printf "\n${GREEN}####### Deploying $HELM_RELEASE to $NAMESPACE ${NC} ####### \n"; +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/centrifugo/." \ + --values "${DIR}/centrifugo/values.yaml" \ + --namespace "${NAMESPACE}" \ + --set nameOverride="${HELM_RELEASE}" \ + --set replicaCount="${REPLICA_COUNT}" \ + --set secrets.tokenHmacSecretKey=averystrongsecret \ + --set secrets.adminPassword=usedIfAdminSetToTrue \ + --set secrets.adminSecret=averystrongsecretforadmin \ + --set secrets.apiKey=usedforpostapi \ + --set config.admin=true \ + --set config.debug=true \ + --set config.log_level=debug \ + --set config.client_anonymous=true \ + --set config.client_channel_limit=512 \ + --set config.namespaces[0].name=relayer \ + --set config.namespaces[0].publish=true \ + --set config.namespaces[0].history_size=10 \ + --set config.namespaces[0].history_ttl=300s \ + --set config.namespaces[0].recover=true \ + --set config.namespaces[0].anonymous=false \ + --set config.namespaces[1].name=transaction \ + --set config.namespaces[1].publish=true \ + --set config.namespaces[1].history_size=10 \ + --set config.namespaces[1].history_ttl=300s \ + --set config.namespaces[1].recover=true \ + --set config.namespaces[1].anonymous=true \ + --set metrics.enabled=true + +# shellcheck disable=SC2028 +echo "\n\n ####### Deployed $HELM_RELEASE to $NAMESPACE ####### \n"; \ No newline at end of file diff --git a/install-bundler/install-grafana/custom-values.yaml b/install-bundler/install-grafana/custom-values.yaml new file mode 100644 index 00000000..8938583c --- /dev/null +++ b/install-bundler/install-grafana/custom-values.yaml @@ -0,0 +1,21 @@ +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - access: proxy + name: Prometheus + type: prometheus + url: http://qabundler-test-monitoring-prometheus-server + +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default \ No newline at end of file diff --git a/install-bundler/install-grafana/datasources.yaml b/install-bundler/install-grafana/datasources.yaml new file mode 100644 index 00000000..88d9d9e6 --- /dev/null +++ b/install-bundler/install-grafana/datasources.yaml @@ -0,0 +1,21 @@ +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + - access: proxy + name: Prometheus + type: prometheus + url: ${PROMETHEUS_URL} + +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/default \ No newline at end of file diff --git a/install-bundler/install-grafana/grafana/.helmignore b/install-bundler/install-grafana/grafana/.helmignore new file mode 100644 index 00000000..8cade131 --- /dev/null +++ b/install-bundler/install-grafana/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/install-bundler/install-grafana/grafana/Chart.yaml b/install-bundler/install-grafana/grafana/Chart.yaml new file mode 100644 index 00000000..1f9776f5 --- /dev/null +++ b/install-bundler/install-grafana/grafana/Chart.yaml @@ -0,0 +1,33 @@ +annotations: + artifacthub.io/license: AGPL-3.0-only + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/grafana/helm-charts + - name: Upstream Project + url: https://github.com/grafana/grafana +apiVersion: v2 +appVersion: 10.1.1 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +keywords: +- monitoring +- metric +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: grafana +sources: +- https://github.com/grafana/grafana +- https://github.com/grafana/helm-charts +type: application +version: 6.59.4 diff --git a/install-bundler/install-grafana/grafana/README.md b/install-bundler/install-grafana/grafana/README.md new file mode 100644 index 00000000..80ddcbb8 --- /dev/null +++ b/install-bundler/install-grafana/grafana/README.md @@ -0,0 +1,692 @@ +#Queries + +http_requests_total{pod=~".*statefulset.*"} + +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | +| `image.sha` | Image sha (optional) | `` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.appProtocol` | Adds the appProtocol field to the service | `` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations (can be templated) | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `headlessService` | Create a headless service | `false` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | +| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `createConfigmap` | Enable creating the grafana configmap | `true` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.securityContext` | Sidecar securityContext | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | +| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | +| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | +| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | +| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | +| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | +| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | +| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | +| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | +| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | +| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | +| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | +| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.labels` | ServiceAccount labels | `{}` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.4.1` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `http` | +| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | +| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | +| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | +| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | +| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | +| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | +| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | +| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | +| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | + +### Example ingress with path + +With grafana 6.3 and above + +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If neither existingClaim or hostPath argument is given then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + loki-dashboard-quick-search: + gnetId: 12019 + revision: 2 + datasource: + - name: DS_PROMETHEUS + value: Prometheus + - name: DS_LOKI + value: Loki + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a postgres datasource as a kubernetes secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: grafana-datasources + labels: + grafana_datasource: 'true' # default value for: sidecar.datasources.label +stringData: + pg-db.yaml: |- + apiVersion: 1 + datasources: + - name: My pg db datasource + type: postgres + url: my-postgresql-db:5432 + user: db-readonly-user + secureJsonData: + password: 'SUperSEcretPa$$word' + jsonData: + database: my_datase + sslmode: 'disable' # disable/require/verify-ca/verify-full + maxOpenConns: 0 # Grafana v5.4+ + maxIdleConns: 2 # Grafana v5.4+ + connMaxLifetime: 14400 # Grafana v5.4+ + postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 + timescaledb: false + # allow users to edit datasources from the UI. + editable: false +``` + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## Provision alert rules, contact points, notification policies and notification templates + +There are two methods to provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +There are two possibilities: + +* Inlining the file contents as described in the example `values.yaml` and the official [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). +* Importing a file using a relative path starting from the chart root directory. + +### Important notes on file provisioning + +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance + +### High Availability for unified alerting + +If you want to run Grafana in a high availability cluster you need to enable +the headless service by setting `headlessService: true` in your `values.yaml` +file. + +As next step you have to setup the `grafana.ini` in your `values.yaml` in a way +that it will make use of the headless service to obtain all the IPs of the +cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. + +```yaml +grafana.ini: + ... + unified_alerting: + enabled: true + ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + + alerting: + enabled: false +``` diff --git a/install-bundler/install-grafana/grafana/ci/default-values.yaml b/install-bundler/install-grafana/grafana/ci/default-values.yaml new file mode 100644 index 00000000..fc2ba605 --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/install-bundler/install-grafana/grafana/ci/with-affinity-values.yaml b/install-bundler/install-grafana/grafana/ci/with-affinity-values.yaml new file mode 100644 index 00000000..f5b9b53e --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-affinity-values.yaml @@ -0,0 +1,16 @@ +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: kubernetes.io/hostname diff --git a/install-bundler/install-grafana/grafana/ci/with-dashboard-json-values.yaml b/install-bundler/install-grafana/grafana/ci/with-dashboard-json-values.yaml new file mode 100644 index 00000000..e0c4e416 --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/install-bundler/install-grafana/grafana/ci/with-dashboard-values.yaml b/install-bundler/install-grafana/grafana/ci/with-dashboard-values.yaml new file mode 100644 index 00000000..7b662c5f --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/install-bundler/install-grafana/grafana/ci/with-extraconfigmapmounts-values.yaml b/install-bundler/install-grafana/grafana/ci/with-extraconfigmapmounts-values.yaml new file mode 100644 index 00000000..5cc44a05 --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-extraconfigmapmounts-values.yaml @@ -0,0 +1,7 @@ +extraConfigmapMounts: + - name: '{{ include "grafana.fullname" . }}' + configMap: '{{ include "grafana.fullname" . }}' + mountPath: /var/lib/grafana/dashboards/test-dashboard.json + # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap + subPath: grafana.ini + readOnly: true diff --git a/install-bundler/install-grafana/grafana/ci/with-image-renderer-values.yaml b/install-bundler/install-grafana/grafana/ci/with-image-renderer-values.yaml new file mode 100644 index 00000000..32f30743 --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-image-renderer-values.yaml @@ -0,0 +1,19 @@ +podLabels: + customLableA: Aaaaa +imageRenderer: + enabled: true + env: + RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + RENDERING_MODE: clustered + podLabels: + customLableB: Bbbbb + networkPolicy: + limitIngress: true + limitEgress: true + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 50Mi diff --git a/install-bundler/install-grafana/grafana/ci/with-persistence.yaml b/install-bundler/install-grafana/grafana/ci/with-persistence.yaml new file mode 100644 index 00000000..b92ca02c --- /dev/null +++ b/install-bundler/install-grafana/grafana/ci/with-persistence.yaml @@ -0,0 +1,3 @@ +persistence: + type: pvc + enabled: true diff --git a/install-bundler/install-grafana/grafana/templates/NOTES.txt b/install-bundler/install-grafana/grafana/templates/NOTES.txt new file mode 100644 index 00000000..d86419fe --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/NOTES.txt @@ -0,0 +1,55 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo + + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} +{{- else }} + Get the Grafana URL to visit by running these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} + {{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 + {{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/_helpers.tpl b/install-bundler/install-grafana/grafana/templates/_helpers.tpl new file mode 100644 index 00000000..0b4e0728 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/_helpers.tpl @@ -0,0 +1,203 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create }} +{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else }} +{{- default "default" .Values.serviceAccount.nameTest }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.extraLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} +{{- if $secret }} +{{- index $secret "data" "admin-password" }} +{{- else }} +{{- (randAlphaNum 40) | b64enc | quote }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" }} +{{- else }} +{{- print "rbac.authorization.k8s.io/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "grafana.hpa.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "grafana.podDisruptionBudget.apiVersion" -}} +{{- if $.Values.podDisruptionBudget.apiVersion }} +{{- print $.Values.podDisruptionBudget.apiVersion }} +{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} +{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "grafana.imagePullSecrets" -}} +{{- $root := .root }} +{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} +{{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml (dict "name" (tpl .name $root)) | trim }} +{{- else }} +- name: {{ tpl . $root }} +{{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/_pod.tpl b/install-bundler/install-grafana/grafana/templates/_pod.tpl new file mode 100644 index 00000000..23be2ca6 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/_pod.tpl @@ -0,0 +1,1162 @@ +{{- define "grafana.pod" -}} +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- $root := . -}} +{{- with .Values.schedulerName }} +schedulerName: "{{ . }}" +{{- end }} +serviceAccountName: {{ include "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- if .Values.initChownData.image.sha }} + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + {{- with .Values.initChownData.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + command: + - chown + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} + - /var/lib/grafana + {{- with .Values.initChownData.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + {{- with .Values.downloadDashboards.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + env: + {{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- with .Values.downloadDashboards.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl . $root }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} + - name: {{ include "grafana.name" . }}-init-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end }} +{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} + - name: {{ include "grafana.name" . }}-init-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- with .Values.extraInitContainers }} + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} +{{- end }} +{{- if not .Values.enableKubeBackwardCompatibility }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +{{- end }} +containers: +{{- if .Values.sidecar.alerts.enabled }} + - name: {{ include "grafana.name" . }}-sc-alerts + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.alerts.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.alerts.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.alerts.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.alerts.watchServerTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.alerts.watchClientTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ include "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.dashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- with .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- if not .Values.sidecar.dashboards.skipReload }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + - name: REQ_URL + value: {{ .Values.sidecar.dashboards.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.dashboards.watchServerTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.dashboards.watchClientTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- with .Values.sidecar.dashboards.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ include "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.datasources.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.datasources.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.datasources.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.datasources.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.datasources.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.datasources.watchServerTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.datasources.watchClientTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ include "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.notifiers.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.notifiers.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.notifiers.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.notifiers.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.notifiers.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.notifiers.watchServerTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.notifiers.watchClientTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.sidecar.plugins.enabled }} + - name: {{ include "grafana.name" . }}-sc-plugins + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.plugins.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.plugins.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.plugins.label }}" + {{- if .Values.sidecar.plugins.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.plugins.labelValue }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/plugins" + - name: RESOURCE + value: {{ quote .Values.sidecar.plugins.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.plugins.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.plugins.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.plugins.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.plugins.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.plugins.watchServerTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.plugins.watchClientTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + mountPath: {{ tpl .mountPath $root }} + subPath: {{ tpl (.subPath | default "") $root }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- with .Values.dashboards }} + {{- range $provider, $dashboards := . }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardsConfigMaps }} + {{- range (keys . | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} + {{- end }} + {{- with .Values.datasources }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.notifiers }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.alerting }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.dashboardProviders }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- end}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml + {{- end}} + {{- end}} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" + {{- end}} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" + {{- end}} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" + {{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.podPortName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ include "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{- end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" + {{- end }} + {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ include "grafana.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycleHooks }} + lifecycle: + {{- tpl (toYaml .) $root | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.extraContainers }} + {{- tpl . $ | nindent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ include "grafana.fullname" . }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ include "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} + {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} + {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} + {{/* nothing */}} + {{- else }} + - name: storage + {{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory + {{- with .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ . }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + emptyDir: + {{- with .Values.sidecar.alerts.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: + {{- with .Values.sidecar.dashboards.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ include "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: + {{- with .Values.sidecar.datasources.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + emptyDir: + {{- with .Values.sidecar.plugins.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: + {{- with .Values.sidecar.notifiers.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- range .Values.extraSecretMounts }} + {{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else if .projected }} + - name: {{ .name }} + projected: + {{- toYaml .projected | nindent 6 }} + {{- else if .csi }} + - name: {{ .name }} + csi: + {{- toYaml .csi | nindent 6 }} + {{- end }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else if .csi }} + csi: + {{- toYaml .data | nindent 6 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} + {{- end }} + {{- with .Values.extraContainerVolumes }} + {{- tpl (toYaml .) $root | nindent 2 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/clusterrole.yaml b/install-bundler/install-grafana/grafana/templates/clusterrole.yaml new file mode 100644 index 00000000..2c9a1801 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} +rules: + {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end}} + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/install-bundler/install-grafana/grafana/templates/clusterrolebinding.yaml b/install-bundler/install-grafana/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..b848e8c1 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +roleRef: + kind: ClusterRole + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }}-clusterrole + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/configmap-dashboard-provider.yaml b/install-bundler/install-grafana/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 00000000..1f706a8b --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-config-dashboards + namespace: {{ include "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end }} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/configmap.yaml b/install-bundler/install-grafana/grafana/templates/configmap.yaml new file mode 100644 index 00000000..b5f21e88 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/configmap.yaml @@ -0,0 +1,138 @@ +{{- if .Values.createConfigmap }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + {{- with .Values.plugins }} + plugins: {{ join "," . }} + {{- end }} + grafana.ini: | + {{- range $elem, $elemVal := index .Values "grafana.ini" }} + {{- if not (kindIs "map" $elemVal) }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := index .Values "grafana.ini" }} + {{- if kindIs "map" $value }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.datasources }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + + {{- range $key, $value := .Values.notifiers }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + + {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.dashboardProviders }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + {{ $dashboardProviders := .Values.dashboardProviders }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + {{- if not $value.acceptHeader }} + -H "Accept: application/json" \ + {{- else }} + -H "Accept: {{ $value.acceptHeader }}" \ + {{- end }} + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + {{- if $value.bearerToken }} + -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Authorization: Basic {{ $value.basic }}" \ + {{- end }} + {{- if $value.gitlabToken }} + -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- $dpPath := "" -}} + {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} + {{- if eq $kd.name $provider }} + {{- $dpPath = $kd.options.path }} + {{- end }} + {{- end }} + {{- if $value.url }} + "{{ $value.url }}" \ + {{- else }} + "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ + {{- end }} + {{- if $value.datasource }} + {{- if kindIs "string" $value.datasource }} + | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ + {{- end }} + {{- if kindIs "slice" $value.datasource }} + {{- range $value.datasource }} + | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ + {{- end }} + {{- end }} + {{- end }} + {{- if $value.b64content }} + | base64 -d \ + {{- end }} + > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/dashboards-json-configmap.yaml b/install-bundler/install-grafana/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 00000000..df0ed0d8 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,35 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ include "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} + {{- print $key | nindent 2 }}.json: + {{- if hasKey $value "json" }} + |- + {{- $value.json | nindent 6 }} + {{- end }} + {{- if hasKey $value "file" }} + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/deployment.yaml b/install-bundler/install-grafana/grafana/templates/deployment.yaml new file mode 100644 index 00000000..bfa26bb4 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/deployment.yaml @@ -0,0 +1,51 @@ +{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + {{- with .Values.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/extra-manifests.yaml b/install-bundler/install-grafana/grafana/templates/extra-manifests.yaml new file mode 100644 index 00000000..a9bb3b6b --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraObjects }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/install-bundler/install-grafana/grafana/templates/headless-service.yaml b/install-bundler/install-grafana/grafana/templates/headless-service.yaml new file mode 100644 index 00000000..3028589d --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-headless + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP + ports: + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/hpa.yaml b/install-bundler/install-grafana/grafana/templates/hpa.yaml new file mode 100644 index 00000000..46bbcb49 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }} + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + {{- if has .Values.persistence.type $sts }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/image-renderer-deployment.yaml b/install-bundler/install-grafana/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 00000000..93d20e8e --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,130 @@ +{{ if .Values.imageRenderer.enabled }} +{{- $root := . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} + replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + + {{- with .Values.imageRenderer.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.imageRenderer.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imageRenderer.schedulerName }} + schedulerName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range . }} + - name: {{ tpl . $root }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.imageRenderer.service.portName }} + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} + {{- range $key, $value := .Values.imageRenderer.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- with .Values.imageRenderer.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/image-renderer-hpa.yaml b/install-bundler/install-grafana/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 00000000..b0f0059b --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/image-renderer-network-policy.yaml b/install-bundler/install-grafana/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 00000000..d1a0eb31 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} +{{- end }} + +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-egress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.targetPort }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/image-renderer-service.yaml b/install-bundler/install-grafana/grafana/templates/image-renderer-service.yaml new file mode 100644 index 00000000..f8da127c --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + {{- with .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + {{- with .Values.imageRenderer.appProtocol }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/image-renderer-servicemonitor.yaml b/install-bundler/install-grafana/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 00000000..5d9f09d2 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/ingress.yaml b/install-bundler/install-grafana/grafana/templates/ingress.yaml new file mode 100644 index 00000000..063cdfaa --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- with $ingressPath }} + path: {{ . }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/networkpolicy.yaml b/install-bundler/install-grafana/grafana/templates/networkpolicy.yaml new file mode 100644 index 00000000..ea4578be --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + policyTypes: + {{- if .Values.networkPolicy.ingress }} + - Ingress + {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + - Egress + {{- end }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + + {{- if .Values.networkPolicy.egress.enabled }} + egress: + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- end }} + {{- if .Values.networkPolicy.ingress }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "grafana.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.explicitNamespacesSelector }} + - namespaceSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "grafana.labels" . | nindent 14 }} + role: read + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/poddisruptionbudget.yaml b/install-bundler/install-grafana/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 00000000..05251214 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/podsecuritypolicy.yaml b/install-bundler/install-grafana/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000..eed7af95 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/pvc.yaml b/install-bundler/install-grafana/grafana/templates/pvc.yaml new file mode 100644 index 00000000..eb8f87f0 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/pvc.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.extraPvcLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- with .Values.persistence.storageClassName }} + storageClassName: {{ . }} + {{- end }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/role.yaml b/install-bundler/install-grafana/grafana/templates/role.yaml new file mode 100644 index 00000000..4b5edd97 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} +rules: + {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}] + {{- end }} + {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- with .Values.rbac.extraRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/rolebinding.yaml b/install-bundler/install-grafana/grafana/templates/rolebinding.yaml new file mode 100644 index 00000000..58f77c6b --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/secret-env.yaml b/install-bundler/install-grafana/grafana/templates/secret-env.yaml new file mode 100644 index 00000000..eb14aac7 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }}-env + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/secret.yaml b/install-bundler/install-grafana/grafana/templates/secret.yaml new file mode 100644 index 00000000..5cbd5274 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/secret.yaml @@ -0,0 +1,26 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ include "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/service.yaml b/install-bundler/install-grafana/grafana/templates/service.yaml new file mode 100644 index 00000000..43d360b5 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/service.yaml @@ -0,0 +1,55 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.extraExposePorts }} + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/serviceaccount.yaml b/install-bundler/install-grafana/grafana/templates/serviceaccount.yaml new file mode 100644 index 00000000..784e71ba --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +{{- $root := . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/servicemonitor.yaml b/install-bundler/install-grafana/grafana/templates/servicemonitor.yaml new file mode 100644 index 00000000..72396828 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ tpl .Values.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/statefulset.yaml b/install-bundler/install-grafana/grafana/templates/statefulset.yaml new file mode 100644 index 00000000..e6c944a4 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/statefulset.yaml @@ -0,0 +1,56 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ include "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + {{- if .Values.persistence.enabled}} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test-configmap.yaml b/install-bundler/install-grafana/grafana/templates/tests/test-configmap.yaml new file mode 100644 index 00000000..01c96c92 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ include "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test-podsecuritypolicy.yaml b/install-bundler/install-grafana/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100644 index 00000000..1821772a --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }}-test + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test-role.yaml b/install-bundler/install-grafana/grafana/templates/tests/test-role.yaml new file mode 100644 index 00000000..cb4c7820 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}-test] +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test-rolebinding.yaml b/install-bundler/install-grafana/grafana/templates/tests/test-rolebinding.yaml new file mode 100644 index 00000000..f40d791f --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "grafana.fullname" . }}-test +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test-serviceaccount.yaml b/install-bundler/install-grafana/grafana/templates/tests/test-serviceaccount.yaml new file mode 100644 index 00000000..38fba359 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" +{{- end }} diff --git a/install-bundler/install-grafana/grafana/templates/tests/test.yaml b/install-bundler/install-grafana/grafana/templates/tests/test.yaml new file mode 100644 index 00000000..9fb88426 --- /dev/null +++ b/install-bundler/install-grafana/grafana/templates/tests/test.yaml @@ -0,0 +1,49 @@ +{{- if .Values.testFramework.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + namespace: {{ include "grafana.namespace" . }} +spec: + serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} + {{- with .Values.testFramework.securityContext }} + securityContext: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ include "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/install-bundler/install-grafana/grafana/values.yaml b/install-bundler/install-grafana/grafana/values.yaml new file mode 100644 index 00000000..fde5817a --- /dev/null +++ b/install-bundler/install-grafana/grafana/values.yaml @@ -0,0 +1,1254 @@ +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# apiVersion: "" +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: docker.io/grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: docker.io/bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: docker.io/curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # annotations: + # kubernetes.io/ingress.class: "nginx" + # nginx.ingress.kubernetes.io/rewrite-target: /$1 + # nginx.ingress.kubernetes.io/use-regex: "true" + + # path: /grafana/?(.*) + # hosts: + # - k8s.example.dev + # # pathType is only for k8s >= 1.1= + # pathType: Prefix + + # hosts: + # - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: docker.io/library/busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc. +## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm +## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # mountPath: /mnt/volume2 + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} + # datasources.yaml: + # apiVersion: 1 + # datasources: + # - access: proxy + # name: Prometheus + # type: prometheus + # url: http://qabundler-test-monitoring-prometheus-server +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 +# deleteDatasources: [] +# - name: Prometheus + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +# server: +# domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" + +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: quay.io/kiwigrid/k8s-sidecar + tag: 1.25.1 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # Deploy the alert sidecar as an initContainer in addition to a container. + # Additional alert sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # image-renderer Image repository + repository: docker.io/grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: + limits: + cpu: 500m + memory: 500Mi + requests: + cpu: 100m + memory: 100Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## ports: + ## - port: 80 + ## - port: 443 + ## + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword diff --git a/install-bundler/install-grafana/install.sh b/install-bundler/install-grafana/install.sh new file mode 100644 index 00000000..acae3633 --- /dev/null +++ b/install-bundler/install-grafana/install.sh @@ -0,0 +1,54 @@ +#!/bin/bash +set -e +RED='\033[0;31m' +NC='\033[0m' +GREEN='\033[0;32m' +ENV=$1 +NAMESPACE=$2 +HOST=localhost +HELM_RELEASE="$NAMESPACE-$ENV-grafana" + +# Check if the required arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 for grafana" + exit 1 +fi + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +echo "Directory of grafana $DIR" + +export PROMETHEUS_URL=http://$NAMESPACE-$ENV-monitoring-prometheus-server +envsubst < $DIR/datasources.yaml > $DIR/custom-values.yaml + +printf "\n${GREEN} ####### Deploying GRAFANA $HELM_RELEASE to $NAMESPACE ${NC} #######"; +printf "\n${RED} The grafana datasource PROMETHEUS URL=<<$PROMETHEUS_URL>> \ + according to format <>, Please change it if required ${NC}\n"; + +# Prompt user for confirmation +read -p "Is the PROMETHEUS URL correct? (y/n) " -n 1 -r +echo # move to a new line + +if [[ $REPLY =~ ^[Nn]$ ]] +then + read -p "Please enter the correct PROMETHEUS URL: " PROMETHEUS_URL + printf "Updated PROMETHEUS URL: $PROMETHEUS_URL" +fi + +helm upgrade --install --wait --timeout 120s $HELM_RELEASE $DIR/grafana/ \ + -n $NAMESPACE \ + --set provider=local \ + -f $DIR/grafana/values.yaml \ + -f $DIR/custom-values.yaml \ + --set adminUser=admin \ + --set host=$HOST \ + --set adminPassword=password \ + --set-file dashboards.default.cluster-monitoring.json=$DIR/kube-cluster-monitoring.json \ + --set-file dashboards.default.node-exporter.json=$DIR/node-exporter-full.json \ + --set-file dashboards.default.node-exporter.json=$DIR/main.json + +printf "GRAFANA deployement completed"; + +#grafana.ini: +# server: +# root_url: http://{{.Values.host}}/grafana \ No newline at end of file diff --git a/install-bundler/install-mongo/install.sh b/install-bundler/install-mongo/install.sh new file mode 100644 index 00000000..9d99e10e --- /dev/null +++ b/install-bundler/install-mongo/install.sh @@ -0,0 +1,46 @@ +#!/bin/bash +set -e +NC='\033[0m' +YELLOW='\033[0;33m' + +NAMESPACE=$1 +HELM_RELEASE="mongo" +PAYMASTER_DBNAME=paymaster-dashboard +PAYMASTER_DBUSER=usertwo +PAYMASTER_DBPASWORD=usertwopassword +RELAYERNODE_DBNAME=relayer-node-service +RELAYERNODE_DBUSER=userone +RELAYERNODE_DBPASWORD=useronepassword + +# Check if the required arguments are passed +if [ "$#" -ne 1 ]; then + echo "Usage: $0 for Mongo" + exit 1 +fi + + +printf "\n${YELLOW}------ Deploying %s to %s -----${NC}\n" "$HELM_RELEASE" "$NAMESPACE" +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/mongodb/" \ + -n "$NAMESPACE" \ + -f "$DIR/mongodb/values.yaml" \ + --set auth.usernames[0]=$RELAYERNODE_DBUSER \ + --set auth.passwords[0]=$RELAYERNODE_DBPASWORD \ + --set auth.databases[0]=$RELAYERNODE_DBNAME \ + --set auth.usernames[1]=$PAYMASTER_DBUSER \ + --set auth.passwords[1]=$PAYMASTER_DBPASWORD \ + --set auth.databases[1]=$PAYMASTER_DBNAME \ + --set metrics.enabled=true \ + --set nameOverride=mongo + +printf "Mongo deployment completed\n"; +echo "To connect to the databases, use the following URLs:" +echo "Paymaster dashboard DB URL:" +echo "mongodb://$PAYMASTER_DBUSER:$PAYMASTER_DBPASWORD@$HELM_RELEASE.$NAMESPACE.svc.cluster.local:27017/$PAYMASTER_DBNAME" +echo "" +echo "RelayerNode DB URL:" +echo "mongodb://$RELAYERNODE_DBUSER:$RELAYERNODE_DBPASWORD@$HELM_RELEASE.$NAMESPACE.svc.cluster.local:27017/$RELAYERNODE_DBNAME" +echo "" +echo "To test connections" +echo "kubectl run -i --tty --rm debugmongo --image=mongo --restart=Never --namespace=$NAMESPACE -- bash" +echo "mongosh mongodb://mongo.$NAMESPACE.svc.cluster.local:27017/relayer-node-service -u userone -p useronepassword" \ No newline at end of file diff --git a/install-bundler/install-mongo/mongodb/.helmignore b/install-bundler/install-mongo/mongodb/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-mongo/mongodb/Chart.lock b/install-bundler/install-mongo/mongodb/Chart.lock new file mode 100644 index 00000000..03e13592 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.11.0 +digest: sha256:250b173b89f097db6e85c9c203d88df6076bf4f7930c9db7fcc072f428922276 +generated: "2023-09-14T04:30:25.185303552Z" diff --git a/install-bundler/install-mongo/mongodb/Chart.yaml b/install-bundler/install-mongo/mongodb/Chart.yaml new file mode 100644 index 00000000..0d4bd94a --- /dev/null +++ b/install-bundler/install-mongo/mongodb/Chart.yaml @@ -0,0 +1,41 @@ +annotations: + category: Database + images: | + - name: kubectl + image: docker.io/bitnami/kubectl:1.25.14-debian-11-r0 + - name: mongodb-exporter + image: docker.io/bitnami/mongodb-exporter:0.39.0-debian-11-r100 + - name: mongodb + image: docker.io/bitnami/mongodb:6.0.10-debian-11-r0 + - name: nginx + image: docker.io/bitnami/nginx:1.25.2-debian-11-r25 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r66 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 6.0.10 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: MongoDB(R) is a relational open source NoSQL database. Easy to use, it + stores data in JSON-like documents. Automated scalability and high-performance. + Ideal for developing cloud native applications. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png +keywords: +- mongodb +- database +- nosql +- cluster +- replicaset +- replication +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: mongodb +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/mongodb +version: 13.18.4 diff --git a/install-bundler/install-mongo/mongodb/README.md b/install-bundler/install-mongo/mongodb/README.md new file mode 100644 index 00000000..c392992f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/README.md @@ -0,0 +1,864 @@ + + +# MongoDB(R) packaged by Bitnami + +MongoDB(R) is a relational open source NoSQL database. Easy to use, it stores data in JSON-like documents. Automated scalability and high-performance. Ideal for developing cloud native applications. + +[Overview of MongoDB®](http://www.mongodb.org) + +Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB(R) is run and maintained by MongoDB, which is a completely separate project from Bitnami. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/mongodb +``` + +## Introduction + +This chart bootstraps a [MongoDB(®)](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use MongoDBreg; in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/mongodb +``` + +The command deploys MongoDB(®) on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Architecture + +This chart allows installing MongoDB(®) using two different architecture setups: `standalone` or `replicaset`. Use the `architecture` parameter to choose the one to use: + +```console +architecture="standalone" +architecture="replicaset" +``` + +Refer to the [chart documentation for more information on each of these architectures](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/get-started/understand-architecture/). + +## Parameters + +### Global parameters + +| Name | Description | Value | +| -------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.namespaceOverride` | Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------- | --------------------------------------------------------------------------------------------------------- | --------------- | +| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override mongodb.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonLabels` | Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `commonAnnotations` | Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template | `{}` | +| `topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### MongoDB(®) parameters + +| Name | Description | Value | +| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `image.registry` | MongoDB(®) image registry | `docker.io` | +| `image.repository` | MongoDB(®) image registry | `bitnami/mongodb` | +| `image.tag` | MongoDB(®) image tag (immutable tags are recommended) | `6.0.10-debian-11-r0` | +| `image.digest` | MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | MongoDB(®) image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `architecture` | MongoDB(®) architecture (`standalone` or `replicaset`) | `standalone` | +| `useStatefulSet` | Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) | `false` | +| `auth.enabled` | Enable authentication | `true` | +| `auth.rootUser` | MongoDB(®) root user | `root` | +| `auth.rootPassword` | MongoDB(®) root password | `""` | +| `auth.usernames` | List of custom users to be created during the initialization | `[]` | +| `auth.passwords` | List of passwords for the custom users set at `auth.usernames` | `[]` | +| `auth.databases` | List of custom databases to be created during the initialization | `[]` | +| `auth.username` | DEPRECATED: use `auth.usernames` instead | `""` | +| `auth.password` | DEPRECATED: use `auth.passwords` instead | `""` | +| `auth.database` | DEPRECATED: use `auth.databases` instead | `""` | +| `auth.replicaSetKey` | Key used for authentication in the replicaset (only when `architecture=replicaset`) | `""` | +| `auth.existingSecret` | Existing secret with MongoDB(®) credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, `mongodb-replica-set-key`) | `""` | +| `tls.enabled` | Enable MongoDB(®) TLS support between nodes in the cluster as well as between mongo clients and nodes | `false` | +| `tls.autoGenerated` | Generate a custom CA and self-signed certificates | `true` | +| `tls.existingSecret` | Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`) | `""` | +| `tls.caCert` | Custom CA certificated (base64 encoded) | `""` | +| `tls.caKey` | CA certificate private key (base64 encoded) | `""` | +| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `tls.standalone.existingSecret` | Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `""` | +| `tls.replicaset.existingSecrets` | Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `[]` | +| `tls.hidden.existingSecrets` | Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `[]` | +| `tls.arbiter.existingSecret` | Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `""` | +| `tls.image.registry` | Init container TLS certs setup image registry | `docker.io` | +| `tls.image.repository` | Init container TLS certs setup image repository | `bitnami/nginx` | +| `tls.image.tag` | Init container TLS certs setup image tag (immutable tags are recommended) | `1.25.2-debian-11-r25` | +| `tls.image.digest` | Init container TLS certs setup image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `tls.image.pullPolicy` | Init container TLS certs setup image pull policy | `IfNotPresent` | +| `tls.image.pullSecrets` | Init container TLS certs specify docker-registry secret names as an array | `[]` | +| `tls.extraDnsNames` | Add extra dns names to the CA, can solve x509 auth issue for pod clients | `[]` | +| `tls.mode` | Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) | `requireTLS` | +| `tls.resources.limits` | Init container generate-tls-certs resource limits | `{}` | +| `tls.resources.requests` | Init container generate-tls-certs resource requests | `{}` | +| `hostAliases` | Add deployment host aliases | `[]` | +| `replicaSetName` | Name of the replica set (only when `architecture=replicaset`) | `rs0` | +| `replicaSetHostnames` | Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) | `true` | +| `enableIPv6` | Switch to enable/disable IPv6 on MongoDB(®) | `false` | +| `directoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB(®) | `false` | +| `systemLogVerbosity` | MongoDB(®) system log verbosity level | `0` | +| `disableSystemLog` | Switch to enable/disable MongoDB(®) system log | `false` | +| `disableJavascript` | Switch to enable/disable MongoDB(®) server-side JavaScript execution | `false` | +| `enableJournal` | Switch to enable/disable MongoDB(®) Journaling | `true` | +| `configuration` | MongoDB(®) configuration file to be used for Primary and Secondary nodes | `""` | + +### replicaSetConfigurationSettings settings applied during runtime (not via configuration file) + +| Name | Description | Value | +| ----------------------------------------------- | --------------------------------------------------------------------------------------------------- | ------- | +| `replicaSetConfigurationSettings.enabled` | Enable MongoDB(®) Switch to enable/disable configuring MongoDB(®) run time rs.conf settings | `false` | +| `replicaSetConfigurationSettings.configuration` | run-time rs.conf settings | `{}` | +| `existingConfigmap` | Name of existing ConfigMap with MongoDB(®) configuration for Primary and Secondary nodes | `""` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsConfigMap` | Existing ConfigMap with custom initdb scripts | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraFlags` | MongoDB(®) additional command line flags | `[]` | +| `extraEnvVars` | Extra environment variables to add to MongoDB(®) pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | + +### MongoDB(®) statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ---------------- | +| `annotations` | Additional labels to be added to the MongoDB(®) statefulset. Evaluated as a template | `{}` | +| `labels` | Annotations to be added to the MongoDB(®) statefulset. Evaluated as a template | `{}` | +| `replicaCount` | Number of MongoDB(®) nodes (only when `architecture=replicaset`) | `2` | +| `updateStrategy.type` | Strategy to use to replace existing MongoDB(®) pods. When architecture=standalone and useStatefulSet=false, | `RollingUpdate` | +| `podManagementPolicy` | Pod management policy for MongoDB(®) | `OrderedReady` | +| `podAffinityPreset` | MongoDB(®) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | MongoDB(®) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | MongoDB(®) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | MongoDB(®) Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | MongoDB(®) Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | MongoDB(®) Affinity for pod assignment | `{}` | +| `nodeSelector` | MongoDB(®) Node labels for pod assignment | `{}` | +| `tolerations` | MongoDB(®) Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | MongoDB(®) Spread Constraints for Pods | `[]` | +| `lifecycleHooks` | LifecycleHook for the MongoDB(®) container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | MongoDB(®) Termination Grace Period | `""` | +| `podLabels` | MongoDB(®) pod labels | `{}` | +| `podAnnotations` | MongoDB(®) Pod annotations | `{}` | +| `priorityClassName` | Name of the existing priority class to be used by MongoDB(®) pod(s) | `""` | +| `runtimeClassName` | Name of the runtime class to be used by MongoDB(®) pod(s) | `""` | +| `podSecurityContext.enabled` | Enable MongoDB(®) pod(s)' Security Context | `true` | +| `podSecurityContext.fsGroup` | Group ID for the volumes of the MongoDB(®) pod(s) | `1001` | +| `podSecurityContext.sysctls` | sysctl settings of the MongoDB(®) pod(s)' | `[]` | +| `containerSecurityContext.enabled` | Enable MongoDB(®) container(s)' Security Context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the MongoDB(®) container | `1001` | +| `containerSecurityContext.runAsGroup` | Group ID for the MongoDB(®) container | `0` | +| `containerSecurityContext.runAsNonRoot` | Set MongoDB(®) container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate MongoDB(®) pod(s) privileges | `false` | +| `containerSecurityContext.seccompProfile.type` | Set MongoDB(®) container's Security Context seccompProfile type | `RuntimeDefault` | +| `containerSecurityContext.capabilities.drop` | Set MongoDB(®) container's Security Context capabilities to drop | `["ALL"]` | +| `resources.limits` | The resources limits for MongoDB(®) containers | `{}` | +| `resources.requests` | The requested resources for MongoDB(®) containers | `{}` | +| `containerPorts.mongodb` | MongoDB(®) container port | `27017` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `20` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `10` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe for MongoDB(®) containers | `{}` | +| `customReadinessProbe` | Override default readiness probe for MongoDB(®) containers | `{}` | +| `customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `initContainers` | Add additional init containers for the hidden node pod(s) | `[]` | +| `sidecars` | Add additional sidecar containers for the MongoDB(®) pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MongoDB(®) container(s) | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes to the MongoDB(®) statefulset | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation for MongoDB(®) pod(s) | `false` | +| `pdb.minAvailable` | Minimum number/percentage of MongoDB(®) pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of MongoDB(®) pods that may be made unavailable after the eviction | `""` | + +### Traffic exposure parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | +| `service.nameOverride` | MongoDB(®) service name | `""` | +| `service.type` | Kubernetes Service type (only for standalone architecture) | `ClusterIP` | +| `service.portName` | MongoDB(®) service port name (only for standalone architecture) | `mongodb` | +| `service.ports.mongodb` | MongoDB(®) service port. | `27017` | +| `service.nodePorts.mongodb` | Port to bind to for NodePort and LoadBalancer service types (only for standalone architecture) | `""` | +| `service.clusterIP` | MongoDB(®) service cluster IP (only for standalone architecture) | `""` | +| `service.externalIPs` | Specify the externalIP value ClusterIP service type (only for standalone architecture) | `[]` | +| `service.loadBalancerIP` | loadBalancerIP for MongoDB(®) Service (only for standalone architecture) | `""` | +| `service.loadBalancerClass` | loadBalancerClass for MongoDB(®) Service (only for standalone architecture) | `""` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer (only for standalone architecture) | `[]` | +| `service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` | +| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `service.annotations` | Provide any additional annotations that may be required | `{}` | +| `service.externalTrafficPolicy` | service external traffic policy (only for standalone architecture) | `Local` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to MongoDB(®) nodes (only for replicaset architecture) | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.14-debian-11-r0` | +| `externalAccess.autoDiscovery.image.digest` | Init container auto-discovery image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `externalAccess.externalMaster.host` | External master host to bootstrap from | `""` | +| `externalAccess.externalMaster.port` | Port for MongoDB(®) service external master host | `27017` | +| `externalAccess.service.type` | Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.portName` | MongoDB(®) port name used for external access when service type is LoadBalancer | `mongodb` | +| `externalAccess.service.ports.mongodb` | MongoDB(®) port used for external access when service type is LoadBalancer | `27017` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(®) nodes | `[]` | +| `externalAccess.service.loadBalancerClass` | loadBalancerClass when service type is LoadBalancer | `""` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.service.externalTrafficPolicy` | MongoDB(®) service external traffic policy | `Local` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort | `[]` | +| `externalAccess.service.domain` | Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort | `""` | +| `externalAccess.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `externalAccess.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `externalAccess.hidden.enabled` | Enable Kubernetes external cluster access to MongoDB(®) hidden nodes | `false` | +| `externalAccess.hidden.service.type` | Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.hidden.service.portName` | MongoDB(®) port name used for external access when service type is LoadBalancer | `mongodb` | +| `externalAccess.hidden.service.ports.mongodb` | MongoDB(®) port used for external access when service type is LoadBalancer | `27017` | +| `externalAccess.hidden.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(®) nodes | `[]` | +| `externalAccess.hidden.service.loadBalancerClass` | loadBalancerClass when service type is LoadBalancer | `""` | +| `externalAccess.hidden.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.hidden.service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` | +| `externalAccess.hidden.service.externalTrafficPolicy` | MongoDB(®) service external traffic policy | `Local` | +| `externalAccess.hidden.service.nodePorts` | Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort. Length must be the same as replicaCount | `[]` | +| `externalAccess.hidden.service.domain` | Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort | `""` | +| `externalAccess.hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `externalAccess.hidden.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.hidden.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `externalAccess.hidden.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | + +### Persistence parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | Enable MongoDB(®) data persistence using PVC | `true` | +| `persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` | +| `persistence.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` | +| `persistence.storageClass` | PVC Storage Class for MongoDB(®) data volume | `""` | +| `persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for MongoDB(®) data volume | `8Gi` | +| `persistence.annotations` | PVC annotations | `{}` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | +| `persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` | +| `persistence.volumeClaimTemplates.dataSource` | Add dataSource to the VolumeClaimTemplate | `{}` | + +### Backup parameters + +| Name | Description | Value | +| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `backup.enabled` | Enable the logical dump of the database "regularly" | `false` | +| `backup.cronjob.schedule` | Set the cronjob parameter schedule | `@daily` | +| `backup.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Allow` | +| `backup.cronjob.failedJobsHistoryLimit` | Set the cronjob parameter failedJobsHistoryLimit | `1` | +| `backup.cronjob.successfulJobsHistoryLimit` | Set the cronjob parameter successfulJobsHistoryLimit | `3` | +| `backup.cronjob.startingDeadlineSeconds` | Set the cronjob parameter startingDeadlineSeconds | `""` | +| `backup.cronjob.ttlSecondsAfterFinished` | Set the cronjob parameter ttlSecondsAfterFinished | `""` | +| `backup.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` | +| `backup.cronjob.containerSecurityContext.runAsUser` | User ID for the backup container | `1001` | +| `backup.cronjob.containerSecurityContext.runAsGroup` | Group ID for the backup container | `0` | +| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set backup container's Security Context runAsNonRoot | `true` | +| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Is the container itself readonly | `true` | +| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate backup pod(s) privileges | `false` | +| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set backup container's Security Context seccompProfile type | `RuntimeDefault` | +| `backup.cronjob.containerSecurityContext.capabilities.drop` | Set backup container's Security Context capabilities to drop | `["ALL"]` | +| `backup.cronjob.command` | Set backup container's command to run | `[]` | +| `backup.cronjob.labels` | Set the cronjob labels | `{}` | +| `backup.cronjob.annotations` | Set the cronjob annotations | `{}` | +| `backup.cronjob.storage.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` | +| `backup.cronjob.storage.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` | +| `backup.cronjob.storage.storageClass` | PVC Storage Class for the backup data volume | `""` | +| `backup.cronjob.storage.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `backup.cronjob.storage.size` | PVC Storage Request for the backup data volume | `8Gi` | +| `backup.cronjob.storage.annotations` | PVC annotations | `{}` | +| `backup.cronjob.storage.mountPath` | Path to mount the volume at | `/backup/mongodb` | +| `backup.cronjob.storage.subPath` | Subdirectory of the volume to mount at | `""` | +| `backup.cronjob.storage.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for MongoDB(®) pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.annotations` | Additional Service Account annotations | `{}` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | +| `rbac.rules` | Custom rules to create following the role specification | `[]` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.allowPrivilegeEscalation` | Enable privilege escalation | `false` | +| `podSecurityPolicy.privileged` | Allow privileged | `false` | +| `podSecurityPolicy.spec` | Specify the full spec to use for Pod Security Policy | `{}` | + +### Volume Permissions parameters + +| Name | Description | Value | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r66` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the volumePermissions container | `0` | + +### Arbiter parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ---------------- | +| `arbiter.enabled` | Enable deploying the arbiter | `true` | +| `arbiter.hostAliases` | Add deployment host aliases | `[]` | +| `arbiter.configuration` | Arbiter configuration file to be used | `""` | +| `arbiter.existingConfigmap` | Name of existing ConfigMap with Arbiter configuration | `""` | +| `arbiter.command` | Override default container command (useful when using custom images) | `[]` | +| `arbiter.args` | Override default container args (useful when using custom images) | `[]` | +| `arbiter.extraFlags` | Arbiter additional command line flags | `[]` | +| `arbiter.extraEnvVars` | Extra environment variables to add to Arbiter pods | `[]` | +| `arbiter.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `arbiter.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | +| `arbiter.annotations` | Additional labels to be added to the Arbiter statefulset | `{}` | +| `arbiter.labels` | Annotations to be added to the Arbiter statefulset | `{}` | +| `arbiter.topologySpreadConstraints` | MongoDB(®) Spread Constraints for arbiter Pods | `[]` | +| `arbiter.lifecycleHooks` | LifecycleHook for the Arbiter container to automate configuration before or after startup | `{}` | +| `arbiter.terminationGracePeriodSeconds` | Arbiter Termination Grace Period | `""` | +| `arbiter.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` | +| `arbiter.podManagementPolicy` | Pod management policy for MongoDB(®) | `OrderedReady` | +| `arbiter.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `arbiter.podAffinityPreset` | Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `arbiter.podAntiAffinityPreset` | Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `arbiter.nodeAffinityPreset.type` | Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `arbiter.nodeAffinityPreset.key` | Arbiter Node label key to match Ignored if `affinity` is set. | `""` | +| `arbiter.nodeAffinityPreset.values` | Arbiter Node label values to match. Ignored if `affinity` is set. | `[]` | +| `arbiter.affinity` | Arbiter Affinity for pod assignment | `{}` | +| `arbiter.nodeSelector` | Arbiter Node labels for pod assignment | `{}` | +| `arbiter.tolerations` | Arbiter Tolerations for pod assignment | `[]` | +| `arbiter.podLabels` | Arbiter pod labels | `{}` | +| `arbiter.podAnnotations` | Arbiter Pod annotations | `{}` | +| `arbiter.priorityClassName` | Name of the existing priority class to be used by Arbiter pod(s) | `""` | +| `arbiter.runtimeClassName` | Name of the runtime class to be used by Arbiter pod(s) | `""` | +| `arbiter.podSecurityContext.enabled` | Enable Arbiter pod(s)' Security Context | `true` | +| `arbiter.podSecurityContext.fsGroup` | Group ID for the volumes of the Arbiter pod(s) | `1001` | +| `arbiter.podSecurityContext.sysctls` | sysctl settings of the Arbiter pod(s)' | `[]` | +| `arbiter.containerSecurityContext.enabled` | Enable Arbiter container(s)' Security Context | `true` | +| `arbiter.containerSecurityContext.runAsUser` | User ID for the Arbiter container | `1001` | +| `arbiter.containerSecurityContext.runAsGroup` | Group ID for the Arbiter container | `0` | +| `arbiter.containerSecurityContext.runAsNonRoot` | Set Arbiter containers' Security Context runAsNonRoot | `true` | +| `arbiter.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate Arbiter pod(s) privileges | `false` | +| `arbiter.containerSecurityContext.seccompProfile.type` | Set Arbiter container's Security Context seccompProfile type | `RuntimeDefault` | +| `arbiter.containerSecurityContext.capabilities.drop` | Set Arbiter container's Security Context capabilities to drop | `["ALL"]` | +| `arbiter.resources.limits` | The resources limits for Arbiter containers | `{}` | +| `arbiter.resources.requests` | The requested resources for Arbiter containers | `{}` | +| `arbiter.containerPorts.mongodb` | MongoDB(®) arbiter container port | `27017` | +| `arbiter.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `arbiter.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `arbiter.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `arbiter.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `arbiter.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `arbiter.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `arbiter.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `arbiter.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `arbiter.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` | +| `arbiter.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `arbiter.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `arbiter.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `arbiter.startupProbe.enabled` | Enable startupProbe | `false` | +| `arbiter.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `arbiter.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `arbiter.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `arbiter.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `arbiter.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `arbiter.customLivenessProbe` | Override default liveness probe for Arbiter containers | `{}` | +| `arbiter.customReadinessProbe` | Override default readiness probe for Arbiter containers | `{}` | +| `arbiter.customStartupProbe` | Override default startup probe for Arbiter containers | `{}` | +| `arbiter.initContainers` | Add additional init containers for the Arbiter pod(s) | `[]` | +| `arbiter.sidecars` | Add additional sidecar containers for the Arbiter pod(s) | `[]` | +| `arbiter.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Arbiter container(s) | `[]` | +| `arbiter.extraVolumes` | Optionally specify extra list of additional volumes to the Arbiter statefulset | `[]` | +| `arbiter.pdb.create` | Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) | `false` | +| `arbiter.pdb.minAvailable` | Minimum number/percentage of Arbiter pods that should remain scheduled | `1` | +| `arbiter.pdb.maxUnavailable` | Maximum number/percentage of Arbiter pods that may be made unavailable | `""` | +| `arbiter.service.nameOverride` | The arbiter service name | `""` | +| `arbiter.service.ports.mongodb` | MongoDB(®) service port | `27017` | +| `arbiter.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `arbiter.service.annotations` | Provide any additional annotations that may be required | `{}` | +| `arbiter.service.headless.annotations` | Annotations for the headless service. | `{}` | + +### Hidden Node parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------- | +| `hidden.enabled` | Enable deploying the hidden nodes | `false` | +| `hidden.hostAliases` | Add deployment host aliases | `[]` | +| `hidden.configuration` | Hidden node configuration file to be used | `""` | +| `hidden.existingConfigmap` | Name of existing ConfigMap with Hidden node configuration | `""` | +| `hidden.command` | Override default container command (useful when using custom images) | `[]` | +| `hidden.args` | Override default container args (useful when using custom images) | `[]` | +| `hidden.extraFlags` | Hidden node additional command line flags | `[]` | +| `hidden.extraEnvVars` | Extra environment variables to add to Hidden node pods | `[]` | +| `hidden.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` | +| `hidden.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` | +| `hidden.annotations` | Additional labels to be added to thehidden node statefulset | `{}` | +| `hidden.labels` | Annotations to be added to the hidden node statefulset | `{}` | +| `hidden.topologySpreadConstraints` | MongoDB(®) Spread Constraints for hidden Pods | `[]` | +| `hidden.lifecycleHooks` | LifecycleHook for the Hidden container to automate configuration before or after startup | `{}` | +| `hidden.replicaCount` | Number of hidden nodes (only when `architecture=replicaset`) | `1` | +| `hidden.terminationGracePeriodSeconds` | Hidden Termination Grace Period | `""` | +| `hidden.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` | +| `hidden.podManagementPolicy` | Pod management policy for hidden node | `OrderedReady` | +| `hidden.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` | +| `hidden.podAffinityPreset` | Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `hidden.podAntiAffinityPreset` | Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `hidden.nodeAffinityPreset.type` | Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `hidden.nodeAffinityPreset.key` | Hidden Node label key to match Ignored if `affinity` is set. | `""` | +| `hidden.nodeAffinityPreset.values` | Hidden Node label values to match. Ignored if `affinity` is set. | `[]` | +| `hidden.affinity` | Hidden node Affinity for pod assignment | `{}` | +| `hidden.nodeSelector` | Hidden node Node labels for pod assignment | `{}` | +| `hidden.tolerations` | Hidden node Tolerations for pod assignment | `[]` | +| `hidden.podLabels` | Hidden node pod labels | `{}` | +| `hidden.podAnnotations` | Hidden node Pod annotations | `{}` | +| `hidden.priorityClassName` | Name of the existing priority class to be used by hidden node pod(s) | `""` | +| `hidden.runtimeClassName` | Name of the runtime class to be used by hidden node pod(s) | `""` | +| `hidden.podSecurityContext.enabled` | Enable Hidden pod(s)' Security Context | `true` | +| `hidden.podSecurityContext.fsGroup` | Group ID for the volumes of the Hidden pod(s) | `1001` | +| `hidden.podSecurityContext.sysctls` | sysctl settings of the Hidden pod(s)' | `[]` | +| `hidden.containerSecurityContext.enabled` | Enable Hidden container(s)' Security Context | `true` | +| `hidden.containerSecurityContext.runAsUser` | User ID for the Hidden container | `1001` | +| `hidden.containerSecurityContext.runAsGroup` | Group ID for the Hidden container | `0` | +| `hidden.containerSecurityContext.runAsNonRoot` | Set Hidden containers' Security Context runAsNonRoot | `true` | +| `hidden.containerSecurityContext.allowPrivilegeEscalation` | Set Hidden containers' Security Context allowPrivilegeEscalation | `false` | +| `hidden.containerSecurityContext.seccompProfile.type` | Set Hidden container's Security Context seccompProfile type | `RuntimeDefault` | +| `hidden.containerSecurityContext.capabilities.drop` | Set Hidden container's Security Context capabilities to drop | `["ALL"]` | +| `hidden.resources.limits` | The resources limits for hidden node containers | `{}` | +| `hidden.resources.requests` | The requested resources for hidden node containers | `{}` | +| `hidden.containerPorts.mongodb` | MongoDB(®) hidden container port | `27017` | +| `hidden.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `hidden.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `hidden.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` | +| `hidden.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `hidden.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `hidden.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `hidden.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `hidden.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `hidden.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` | +| `hidden.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `hidden.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `hidden.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `hidden.startupProbe.enabled` | Enable startupProbe | `false` | +| `hidden.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `hidden.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `hidden.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `hidden.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `hidden.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `hidden.customLivenessProbe` | Override default liveness probe for hidden node containers | `{}` | +| `hidden.customReadinessProbe` | Override default readiness probe for hidden node containers | `{}` | +| `hidden.customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `hidden.initContainers` | Add init containers to the MongoDB(®) Hidden pods. | `[]` | +| `hidden.sidecars` | Add additional sidecar containers for the hidden node pod(s) | `[]` | +| `hidden.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the hidden node container(s) | `[]` | +| `hidden.extraVolumes` | Optionally specify extra list of additional volumes to the hidden node statefulset | `[]` | +| `hidden.pdb.create` | Enable/disable a Pod Disruption Budget creation for hidden node pod(s) | `false` | +| `hidden.pdb.minAvailable` | Minimum number/percentage of hidden node pods that should remain scheduled | `1` | +| `hidden.pdb.maxUnavailable` | Maximum number/percentage of hidden node pods that may be made unavailable | `""` | +| `hidden.persistence.enabled` | Enable hidden node data persistence using PVC | `true` | +| `hidden.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `hidden.persistence.storageClass` | PVC Storage Class for hidden node data volume | `""` | +| `hidden.persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` | +| `hidden.persistence.size` | PVC Storage Request for hidden node data volume | `8Gi` | +| `hidden.persistence.annotations` | PVC annotations | `{}` | +| `hidden.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB(®) images. | `/bitnami/mongodb` | +| `hidden.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments | `""` | +| `hidden.persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` | +| `hidden.persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` | +| `hidden.persistence.volumeClaimTemplates.dataSource` | Set volumeClaimTemplate dataSource | `{}` | +| `hidden.service.portName` | MongoDB(®) service port name | `mongodb` | +| `hidden.service.ports.mongodb` | MongoDB(®) service port | `27017` | +| `hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `hidden.service.annotations` | Provide any additional annotations that may be required | `{}` | +| `hidden.service.headless.annotations` | Annotations for the headless service. | `{}` | + +### Metrics parameters + +| Name | Description | Value | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `metrics.enabled` | Enable using a sidecar Prometheus exporter | `false` | +| `metrics.image.registry` | MongoDB(®) Prometheus exporter image registry | `docker.io` | +| `metrics.image.repository` | MongoDB(®) Prometheus exporter image repository | `bitnami/mongodb-exporter` | +| `metrics.image.tag` | MongoDB(®) Prometheus exporter image tag (immutable tags are recommended) | `0.39.0-debian-11-r100` | +| `metrics.image.digest` | MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | MongoDB(®) Prometheus exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.username` | String with username for the metrics exporter | `""` | +| `metrics.password` | String with password for the metrics exporter | `""` | +| `metrics.compatibleMode` | Enables old style mongodb-exporter metrics | `true` | +| `metrics.collector.all` | Enable all collectors. Same as enabling all individual metrics | `false` | +| `metrics.collector.diagnosticdata` | Boolean Enable collecting metrics from getDiagnosticData | `true` | +| `metrics.collector.replicasetstatus` | Boolean Enable collecting metrics from replSetGetStatus | `true` | +| `metrics.collector.dbstats` | Boolean Enable collecting metrics from dbStats | `false` | +| `metrics.collector.topmetrics` | Boolean Enable collecting metrics from top admin command | `false` | +| `metrics.collector.indexstats` | Boolean Enable collecting metrics from $indexStats | `false` | +| `metrics.collector.collstats` | Boolean Enable collecting metrics from $collStats | `false` | +| `metrics.collector.collstatsColls` | List of \.\ to get $collStats | `[]` | +| `metrics.collector.indexstatsColls` | List - List of \.\ to get $indexStats | `[]` | +| `metrics.collector.collstatsLimit` | Number - Disable collstats, dbstats, topmetrics and indexstats collector if there are more than \ collections. 0=No limit | `0` | +| `metrics.extraFlags` | String with extra flags to the metrics exporter | `""` | +| `metrics.command` | Override default container command (useful when using custom images) | `[]` | +| `metrics.args` | Override default container args (useful when using custom images) | `[]` | +| `metrics.resources.limits` | The resources limits for Prometheus exporter containers | `{}` | +| `metrics.resources.requests` | The requested resources for Prometheus exporter containers | `{}` | +| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` | +| `metrics.service.annotations` | Annotations for Prometheus Exporter pods. Evaluated as a template. | `{}` | +| `metrics.service.type` | Type of the Prometheus metrics service | `ClusterIP` | +| `metrics.service.ports.metrics` | Port of the Prometheus metrics service | `9216` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.startupProbe.enabled` | Enable startupProbe | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.customLivenessProbe` | Override default liveness probe for MongoDB(®) containers | `{}` | +| `metrics.customReadinessProbe` | Override default readiness probe for MongoDB(®) containers | `{}` | +| `metrics.customStartupProbe` | Override default startup probe for MongoDB(®) containers | `{}` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the metrics container(s) | `[]` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.labels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | Namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | Rules to be created, check values for an example | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.rootPassword=secretpassword,auth.username=my-user,auth.password=my-password,auth.database=my-database \ + oci://registry-1.docker.io/bitnamicharts/mongodb +``` + +The above command sets the MongoDB(®) `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/mongodb +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Customize a new MongoDB instance + +The [Bitnami MongoDB(®) image](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) supports the use of custom scripts to initialize a fresh instance. In order to execute the scripts, two options are available: + +- Specify them using the `initdbScripts` parameter as dict. +- Define an external Kubernetes ConfigMap with all the initialization scripts by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option. + +The allowed script extensions are `.sh` and `.js`. + +### Replicaset: Access MongoDB(®) nodes from outside the cluster + +In order to access MongoDB(®) nodes from outside the cluster when using a replicaset architecture, a specific service per MongoDB(®) pod will be created. There are two ways of configuring external access: + +- Using LoadBalancer services +- Using NodePort services. + +Refer to the [chart documentation for more details and configuration examples](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-external-access-replicaset/). + +### Bootstrapping with an External Cluster + +This chart is equipped with the ability to bring online a set of Pods that connect to an existing MongoDB(®) deployment that lies outside of Kubernetes. This effectively creates a hybrid MongoDB(®) Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single MongoDB(®) Deployment. This is helpful in situations where one may be migrating MongoDB(®) from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration: + +```yaml +externalAccess: + externalMaster: + enabled: true + host: external-mongodb-0.internal +``` + +:warning: To bootstrap MongoDB(®) with an external master that lies outside of Kubernetes, be sure to set up external access using any of the suggested methods in this chart to have connectivity between the MongoDB(®) members. :warning: + +### Add extra environment variables + +To add extra environment variables (useful for advanced operations like custom init scripts), use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use Sidecars and Init Containers + +If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter. + +Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-sidecar-init-containers/). + +## Persistence + +The [Bitnami MongoDB(®)](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) image stores the MongoDB(®) data and configurations at the `/bitnami/mongodb` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +## Use custom Prometheus rules + +Custom Prometheus rules can be defined for the Prometheus Operator by using the `prometheusRule` parameter. + +Refer to the [chart documentation for an example of a custom rule](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/use-prometheus-rules/). + +## Enable SSL/TLS + +This chart supports enabling SSL/TLS between nodes in the cluster, as well as between MongoDB(®) clients and nodes, by setting the `MONGODB_EXTRA_FLAGS` and `MONGODB_CLIENT_EXTRA_FLAGS` container environment variables, together with the correct `MONGODB_ADVERTISED_HOSTNAME`. To enable full TLS encryption, set the `tls.enabled` parameter to `true`. + +Refer to the [chart documentation for more information on enabling TLS](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/enable-tls/). + +### Set Pod affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +If authentication is enabled, it's necessary to set the `auth.rootPassword` (also `auth.replicaSetKey` when using a replicaset architecture) when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password, and run the command below to upgrade your chart: + +```console +helm upgrade my-release oci://registry-1.docker.io/bitnamicharts/mongodb --set auth.rootPassword=[PASSWORD] (--set auth.replicaSetKey=[REPLICASETKEY]) +``` + +> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes. + +### To 12.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `strategyType` is replaced by `updateStrategy` +- `service.port` is renamed to `service.ports.mongodb` +- `service.nodePort` is renamed to `service.nodePorts.mongodb` +- `externalAccess.service.port` is renamed to `externalAccess.hidden.service.ports.mongodb` +- `rbac.role.rules` is renamed to `rbac.rules` +- `externalAccess.hidden.service.port` is renamed ot `externalAccess.hidden.service.ports.mongodb` +- `hidden.strategyType` is replaced by `hidden.updateStrategy` +- `metrics.serviceMonitor.relabellings` is renamed to `metrics.serviceMonitor.relabelings`(typo fixed) +- `metrics.serviceMonitor.additionalLabels` is renamed to `metrics.serviceMonitor.labels` + +Additionally also updates the MongoDB image dependency to it newest major, 5.0 + +### To 11.0.0 + +In this version, the mongodb-exporter bundled as part of this Helm chart was updated to a new version which, even it is not a major change, can contain breaking changes (from `0.11.X` to `0.30.X`). +Please visit the release notes from the upstream project at + +### To 10.0.0 + +[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/upgrade-helm3/). + +### To 9.0.0 + +MongoDB(®) container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB(®). Refer to the following guides to upgrade your applications: + +- [Standalone](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-standalone/) +- [Replica Set](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-replica-set/) + +### To 8.0.0 + +- Architecture used to configure MongoDB(®) as a replicaset was completely refactored. Now, both primary and secondary nodes are part of the same statefulset. +- Chart labels were adapted to follow the Helm charts best practices. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- Several parameters were renamed or disappeared in favor of new ones on this major version. These are the most important ones: + - `replicas` is renamed to `replicaCount`. + - Authentication parameters are reorganized under the `auth.*` parameter: + - `usePassword` is renamed to `auth.enabled`. + - `mongodbRootPassword`, `mongodbUsername`, `mongodbPassword`, `mongodbDatabase`, and `replicaSet.key` are now `auth.rootPassword`, `auth.username`, `auth.password`, `auth.database`, and `auth.replicaSetKey` respectively. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Parameters prefixed with `mongodb` are renamed removing the prefix. E.g. `mongodbEnableIPv6` is renamed to `enableIPv6`. + - Parameters affecting Arbiter nodes are reorganized under the `arbiter.*` parameter. + +Consequences: + +- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MongoDB(®) chart, and migrate your data by creating a backup of the database, and restoring it on the new release. + +### To 7.0.0 + +From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example: + +```yaml +ingress: + hosts: + - name: mongodb.local + path: / +``` + +### To 6.0.0 + +From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command. +You can find more information in the [`bitnami/mongodb` image README](https://github.com/bitnami/containers/tree/main/bitnami/mongodb#readme). + +### To 5.0.0 + +When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`: + +```console +kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false +``` + +### Add extra deployment options + +To add extra deployments (useful for advanced features like sidecars), use the `extraDeploy` property. + +In the example below, you can find how to use a example here for a [MongoDB replica set pod labeler sidecar](https://github.com/combor/k8s-mongo-labeler-sidecar) to identify the primary pod and dynamically label it as the primary node: + +```yaml +extraDeploy: + - apiVersion: v1 + kind: Service + metadata: + name: mongodb-primary + namespace: default + labels: + app.kubernetes.io/component: mongodb + app.kubernetes.io/instance: mongodb + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/name: mongodb + spec: + type: NodePort + externalTrafficPolicy: Cluster + ports: + - name: mongodb-primary + port: 30001 + nodePort: 30001 + protocol: TCP + targetPort: mongodb + selector: + app.kubernetes.io/component: mongodb + app.kubernetes.io/instance: mongodb + app.kubernetes.io/name: mongodb + primary: "true" +``` + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/install-bundler/install-mongo/mongodb/charts/common/.helmignore b/install-bundler/install-mongo/mongodb/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/install-mongo/mongodb/charts/common/Chart.yaml b/install-bundler/install-mongo/mongodb/charts/common/Chart.yaml new file mode 100644 index 00000000..f62cb205 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.11.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.11.0 diff --git a/install-bundler/install-mongo/mongodb/charts/common/README.md b/install-bundler/install-mongo/mongodb/charts/common/README.md new file mode 100644 index 00000000..fe6a0100 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_affinities.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..e85b1df4 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_capabilities.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..c6d115fe --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_capabilities.tpl @@ -0,0 +1,185 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_errors.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..07ded6f6 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_images.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_images.tpl new file mode 100644 index 00000000..e248d6d0 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_images.tpl @@ -0,0 +1,101 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_ingress.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..efa5b85c --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_labels.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..a0534f7f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_labels.tpl @@ -0,0 +1,39 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) (dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_names.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_names.tpl new file mode 100644 index 00000000..a222924f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_secrets.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a193c46b --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_storage.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..16405a0f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_tplvalues.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..a8ed7637 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_utils.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..c87040cd --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_utils.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/_warnings.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..66dffc1f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_cassandra.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..eda9aada --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mariadb.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..17d83a2f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mongodb.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..bbb445b8 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mysql.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..ca3953f8 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_postgresql.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..8c9aa570 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_redis.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..fc0d208d --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_validations.tpl b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..31ceda87 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-mongo/mongodb/charts/common/values.yaml b/install-bundler/install-mongo/mongodb/charts/common/values.yaml new file mode 100644 index 00000000..9abe0e15 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/install-bundler/install-mongo/mongodb/templates/NOTES.txt b/install-bundler/install-mongo/mongodb/templates/NOTES.txt new file mode 100644 index 00000000..131a86b5 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/NOTES.txt @@ -0,0 +1,202 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh + +{{- else }} + +{{- $replicaCount := int .Values.replicaCount }} +{{- $portNumber := int .Values.service.ports.mongodb }} +{{- $fullname := include "mongodb.fullname" . }} +{{- $releaseNamespace := include "mongodb.namespace" . }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- $mongoList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $portNumber) }} +{{- end }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +#################################################################################### +### ERROR: You enabled external access to MongoDB® nodes without specifying ### +### the array of load balancer IPs for MongoDB® nodes. ### +#################################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for MongoDB® nodes. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $e, $i := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade --namespace {{ .Release.Namespace }} {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/{{ .Chart.Name }} \ + --set mongodb.replicaCount={{ $replicaCount }} \ + --set mongodb.externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set mongodb.externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set mongodb.externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (and (eq .Values.architecture "standalone") (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort"))) (and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled)) (not .Values.auth.enabled) }} +------------------------------------------------------------------------------- + WARNING + + By not enabling "mongodb.auth.enabled" you have most likely exposed the + MongoDB® service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you enable authentiation + setting the "mongodb.auth.enabled" parameter to "true". + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +MongoDB® can be accessed on the following DNS name(s) and ports from within your cluster: + +{{- if eq .Values.architecture "replicaset" }} +{{ join "\n" $mongoList | nindent 4 }} +{{- else }} + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{- if .Values.auth.enabled }} + +To get the root password run: + + export MONGODB_ROOT_PASSWORD=$(kubectl get secret --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.secretName" . }} -o jsonpath="{.data.mongodb-root-password}" | base64 -d) + +{{- end }} +{{- $customUsers := include "mongodb.customUsers" . -}} +{{- $customDatabases := include "mongodb.customDatabases" . -}} +{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} +{{- $customUsersList := splitList "," $customUsers }} +{{- range $index, $user := $customUsersList }} + +To get the password for "{{ $user }}" run: + + export MONGODB_PASSWORD=$(kubectl get secret --namespace {{ include "mongodb.namespace" $ }} {{ include "mongodb.secretName" $ }} -o jsonpath="{.data.mongodb-passwords}" | base64 -d | awk -F',' '{print ${{ add 1 $index }}}') + +{{- end }} +{{- end }} + +To connect to your database, create a MongoDB® client container: + + kubectl run --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.fullname" . }}-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image {{ template "mongodb.image" . }} --command -- bash + +Then, run the following command: + + {{- if eq .Values.architecture "replicaset" }} + mongosh admin --host "{{ join "," $mongoList }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u $MONGODB_ROOT_USER -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- else }} + mongosh admin --host "{{ template "mongodb.service.nameOverride" . }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u $MONGODB_ROOT_USER -p $MONGODB_ROOT_PASSWORD{{- end }} + {{- end }} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled }} + +To connect to your database nodes from outside, you need to add both primary and secondary nodes hostnames/IPs to your Mongo client. To obtain them, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + MongoDB® nodes domain: Use your provided hostname to reach MongoDB® nodes, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + MongoDB® nodes domain: you can reach MongoDB® nodes on any of the K8s nodes external IPs. + + kubectl get nodes -o wide + +{{- end }} + + MongoDB® nodes port: You will have a different node port for each MongoDB® node. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -w' + + MongoDB® nodes domain: You will have a different external IP for each MongoDB® node. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "mongodb.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=mongodb,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + MongoDB® nodes port: {{ .Values.externalAccess.service.ports.mongodb }} + +{{- end }} + +{{- else if eq .Values.architecture "standalone" }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "mongodb.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "mongodb.service.nameOverride" . }}) + mongo --host $NODE_IP --port $NODE_PORT {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "mongodb.namespace" . }} -w {{ template "mongodb.service.nameOverride" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "mongodb.namespace" . }} {{ template "mongodb.service.nameOverride" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + mongosh --host $SERVICE_IP --port {{ $portNumber }} {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ template "mongodb.namespace" . }} svc/{{ template "mongodb.service.nameOverride" . }} {{ $portNumber }}:{{ $portNumber }} & + mongosh --host 127.0.0.1 {{- if .Values.auth.enabled }} --authenticationDatabase admin -p $MONGODB_ROOT_PASSWORD{{- end }} + +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the MongoDB® Prometheus metrics, get the MongoDB® Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-metrics" (include "mongodb.fullname" .) }} {{ .Values.metrics.service.ports.metrics }}:{{ .Values.metrics.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} +{{- end }} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.tls.image }} +{{- include "mongodb.validateValues" . }} diff --git a/install-bundler/install-mongo/mongodb/templates/_helpers.tpl b/install-bundler/install-mongo/mongodb/templates/_helpers.tpl new file mode 100644 index 00000000..cd8afdcb --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/_helpers.tpl @@ -0,0 +1,508 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "mongodb.name" -}} +{{- include "common.names.name" . -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mongodb.fullname" -}} +{{- include "common.names.fullname" . -}} +{{- end -}} + +{{/* +Create a default mongo service name which can be overridden. +*/}} +{{- define "mongodb.service.nameOverride" -}} + {{- if and .Values.service .Values.service.nameOverride -}} + {{- print .Values.service.nameOverride -}} + {{- else -}} + {{- if eq .Values.architecture "replicaset" -}} + {{- printf "%s-headless" (include "mongodb.fullname" .) -}} + {{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Create a default mongo arbiter service name which can be overridden. +*/}} +{{- define "mongodb.arbiter.service.nameOverride" -}} + {{- if and .Values.arbiter.service .Values.arbiter.service.nameOverride -}} + {{- print .Values.arbiter.service.nameOverride -}} + {{- else -}} + {{- printf "%s-arbiter-headless" (include "mongodb.fullname" .) -}} + {{- end }} +{{- end }} + +{{/* +Return the proper MongoDB® image name +*/}} +{{- define "mongodb.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "mongodb.metrics.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "mongodb.volumePermissions.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "mongodb.externalAccess.autoDiscovery.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper image name (for the TLS Certs image) +*/}} +{{- define "mongodb.tls.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.tls.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "mongodb.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.metrics.image .Values.volumePermissions.image .Values.tls.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "mongodb.namespace" -}} + {{- if and .Values.global .Values.global.namespaceOverride -}} + {{- print .Values.global.namespaceOverride -}} + {{- else -}} + {{- print .Release.Namespace -}} + {{- end }} +{{- end -}} +{{- define "mongodb.serviceMonitor.namespace" -}} + {{- if .Values.metrics.serviceMonitor.namespace -}} + {{- print .Values.metrics.serviceMonitor.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} +{{- define "mongodb.prometheusRule.namespace" -}} + {{- if .Values.metrics.prometheusRule.namespace -}} + {{- print .Values.metrics.prometheusRule.namespace -}} + {{- else -}} + {{- include "mongodb.namespace" . -}} + {{- end }} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either mongodb.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "mongodb.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{- default (include "mongodb.fullname" .) (print .Values.serviceAccount.name) -}} + {{- else -}} + {{- default "default" (print .Values.serviceAccount.name) -}} + {{- end -}} +{{- end -}} + +{{/* +Return the list of custom users to create during the initialization (string format) +*/}} +{{- define "mongodb.customUsers" -}} + {{- $customUsers := list -}} + {{- if .Values.auth.username -}} + {{- $customUsers = append $customUsers .Values.auth.username }} + {{- end }} + {{- range .Values.auth.usernames }} + {{- $customUsers = append $customUsers . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customUsers)) -}} +{{- end -}} + +{{/* +Return the list of passwords for the custom users (string format) +*/}} +{{- define "mongodb.customPasswords" -}} + {{- $customPasswords := list -}} + {{- if .Values.auth.password -}} + {{- $customPasswords = append $customPasswords .Values.auth.password }} + {{- end }} + {{- range .Values.auth.passwords }} + {{- $customPasswords = append $customPasswords . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customPasswords)) -}} +{{- end -}} + +{{/* +Return the list of custom databases to create during the initialization (string format) +*/}} +{{- define "mongodb.customDatabases" -}} + {{- $customDatabases := list -}} + {{- if .Values.auth.database -}} + {{- $customDatabases = append $customDatabases .Values.auth.database }} + {{- end }} + {{- range .Values.auth.databases }} + {{- $customDatabases = append $customDatabases . }} + {{- end }} + {{- printf "%s" (default "" (join "," $customDatabases)) -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration +*/}} +{{- define "mongodb.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® +*/}} +{{- define "mongodb.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret with MongoDB® credentials +*/}} +{{- define "mongodb.secretName" -}} + {{- if .Values.auth.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} + {{- else -}} + {{- printf "%s" (include "mongodb.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for MongoDB® +*/}} +{{- define "mongodb.createSecret" -}} +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "mongodb.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" .Values.initdbScriptsConfigMap -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if the Arbiter should be deployed +*/}} +{{- define "mongodb.arbiter.enabled" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration for the Arbiter +*/}} +{{- define "mongodb.arbiter.configmapName" -}} +{{- if .Values.arbiter.existingConfigmap -}} + {{- printf "%s" (tpl .Values.arbiter.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-arbiter" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® Arbiter +*/}} +{{- define "mongodb.arbiter.createConfigmap" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.arbiter.enabled .Values.arbiter.configuration (not .Values.arbiter.existingConfigmap) }} + {{- true -}} +{{- else -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if the Hidden should be deployed +*/}} +{{- define "mongodb.hidden.enabled" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.hidden.enabled }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configmap with the MongoDB® configuration for the Hidden +*/}} +{{- define "mongodb.hidden.configmapName" -}} +{{- if .Values.hidden.existingConfigmap -}} + {{- printf "%s" (tpl .Values.hidden.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-hidden" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created for MongoDB® Hidden +*/}} +{{- define "mongodb.hidden.createConfigmap" -}} +{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.enabled .Values.hidden.configuration (not .Values.hidden.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "mongodb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "mongodb.validateValues.pspAndRBAC" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBs" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.customUsersDBsLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.loadBalancerIPsListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.replicaset.existingSecrets" .) -}} +{{- $messages := append $messages (include "mongodb.validateValues.hidden.existingSecrets" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate RBAC is created when using PSP */}} +{{- define "mongodb.validateValues.pspAndRBAC" -}} +{{- if and (.Values.podSecurityPolicy.create) (not .Values.rbac.create) -}} +mongodb: podSecurityPolicy.create, rbac.create + Both podSecurityPolicy.create and rbac.create must be true, if you want + to create podSecurityPolicy +{{- end -}} +{{- end -}} + +{{/* Validate values of MongoDB® - must provide a valid architecture */}} +{{- define "mongodb.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replicaset") -}} +mongodb: architecture + Invalid architecture selected. Valid values are "standalone" and + "replicaset". Please set a valid architecture (--set mongodb.architecture="xxxx") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both auth.usernames and auth.databases are necessary +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.customUsersDBs" -}} +{{- $customUsers := include "mongodb.customUsers" . -}} +{{- $customDatabases := include "mongodb.customDatabases" . -}} +{{- if or (and (empty $customUsers) (not (empty $customDatabases))) (and (not (empty $customUsers)) (empty $customDatabases)) }} +mongodb: auth.usernames, auth.databases + Both auth.usernames and auth.databases must be provided to create + custom users and databases during 1st initialization. + Please set both of them (--set auth.usernames[0]="xxxx",auth.databases[0]="yyyy") +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - both auth.usernames and auth.databases arrays should have the same length +to create a custom user and database during 1st initialization +*/}} +{{- define "mongodb.validateValues.customUsersDBsLength" -}} +{{- if ne (len .Values.auth.usernames) (len .Values.auth.databases) }} +mongodb: auth.usernames, auth.databases + Both auth.usernames and auth.databases arrays should have the same length +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - service type for external access +*/}} +{{- define "mongodb.validateValues.externalAccessServiceType" -}} +{{- if and (eq .Values.architecture "replicaset") (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}} +mongodb: externalAccess.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - number of replicas must be the same than LoadBalancer IPs list +*/}} +{{- define "mongodb.validateValues.loadBalancerIPsListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled ) (eq .Values.externalAccess.service.type "LoadBalancer") (not (eq $replicaCount $loadBalancerListLength )) -}} +mongodb: .Values.externalAccess.service.loadBalancerIPs + Number of replicas and loadBalancerIPs array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - number of replicas must be the same than NodePort list +*/}} +{{- define "mongodb.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "NodePort") (not (eq $replicaCount $nodePortListLength )) -}} +mongodb: .Values.externalAccess.service.nodePorts + Number of replicas and nodePorts array length must be the same. +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - RBAC should be enabled when autoDiscovery is enabled +*/}} +{{- define "mongodb.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +mongodb: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to autodetect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - Number of replicaset secrets must be the same than number of replicaset nodes. +*/}} +{{- define "mongodb.validateValues.replicaset.existingSecrets" -}} +{{- if and .Values.tls.enabled (eq .Values.architecture "replicaset") (not (empty .Values.tls.replicaset.existingSecrets)) }} +{{- $nbSecrets := len .Values.tls.replicaset.existingSecrets -}} +{{- if not (eq $nbSecrets (int .Values.replicaCount)) }} +mongodb: tls.replicaset.existingSecrets + tls.replicaset.existingSecrets Number of secrets and number of replicaset nodes must be the same. +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® - Number of hidden secrets must be the same than number of hidden nodes. +*/}} +{{- define "mongodb.validateValues.hidden.existingSecrets" -}} +{{- if and .Values.tls.enabled (include "mongodb.hidden.enabled" .) (not (empty .Values.tls.hidden.existingSecrets)) }} +{{- $nbSecrets := len .Values.tls.hidden.existingSecrets -}} +{{- if not (eq $nbSecrets (int .Values.hidden.replicaCount)) }} +mongodb: tls.hidden.existingSecrets + tls.hidden.existingSecrets Number of secrets and number of hidden nodes must be the same. +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of MongoDB® exporter URI string - auth.enabled and/or tls.enabled must be enabled or it defaults +*/}} +{{- define "mongodb.mongodb_exporter.uri" -}} + {{- $uriTlsArgs := ternary "tls=true&tlsCertificateKeyFile=/certs/mongodb.pem&tlsCAFile=/certs/mongodb-ca-cert" "" .Values.tls.enabled -}} + {{- if .Values.metrics.username }} + {{- $uriAuth := ternary "$(echo $MONGODB_METRICS_USERNAME | sed -r \"s/@/%40/g;s/:/%3A/g\"):$(echo $MONGODB_METRICS_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}} + {{- printf "mongodb://%slocalhost:%d/admin?%s" $uriAuth (int .Values.containerPorts.mongodb) $uriTlsArgs -}} + {{- else -}} + {{- $uriAuth := ternary "$MONGODB_ROOT_USER:$(echo $MONGODB_ROOT_PASSWORD | sed -r \"s/@/%40/g;s/:/%3A/g\")@" "" .Values.auth.enabled -}} + {{- printf "mongodb://%slocalhost:%d/admin?%s" $uriAuth (int .Values.containerPorts.mongodb) $uriTlsArgs -}} + {{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "mongodb.createTlsSecret" -}} +{{- if and .Values.tls.enabled (not .Values.tls.existingSecret) (include "mongodb.autoGenerateCerts" .) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing MongoDB® TLS certificates +*/}} +{{- define "mongodb.tlsSecretName" -}} +{{- $secretName := .Values.tls.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-ca" (include "mongodb.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if certificates must be auto generated +*/}} +{{- define "mongodb.autoGenerateCerts" -}} +{{- $standalone := (eq .Values.architecture "standalone") | ternary (not .Values.tls.standalone.existingSecret) true -}} +{{- $replicaset := (eq .Values.architecture "replicaset") | ternary (empty .Values.tls.replicaset.existingSecrets) true -}} +{{- $arbiter := (eq (include "mongodb.arbiter.enabled" .) "true") | ternary (not .Values.tls.arbiter.existingSecret) true -}} +{{- $hidden := (eq (include "mongodb.hidden.enabled" .) "true") | ternary (empty .Values.tls.hidden.existingSecrets) true -}} +{{- if and $standalone $replicaset $arbiter $hidden -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Generate argument list for mongodb-exporter +reference: https://github.com/percona/mongodb_exporter/blob/main/REFERENCE.md +*/}} +{{- define "mongodb.exporterArgs" -}} +{{- with .Values.metrics.collector -}} +{{- ternary " --collect-all" "" .all -}} +{{- ternary " --collector.diagnosticdata" "" .diagnosticdata -}} +{{- ternary " --collector.replicasetstatus" "" .replicasetstatus -}} +{{- ternary " --collector.dbstats" "" .dbstats -}} +{{- ternary " --collector.topmetrics" "" .topmetrics -}} +{{- ternary " --collector.indexstats" "" .indexstats -}} +{{- ternary " --collector.collstats" "" .collstats -}} +{{- if .collstatsColls -}} +{{- " --mongodb.collstats-colls=" -}} +{{- join "," .collstatsColls -}} +{{- end -}} +{{- if .indexstatsColls -}} +{{- " --mongodb.indexstats-colls=" -}} +{{- join "," .indexstatsColls -}} +{{- end -}} +{{- $limitArg := print " --collector.collstats-limit=" .collstatsLimit -}} +{{- ne (print .collstatsLimit) "0" | ternary $limitArg "" -}} +{{- end -}} +{{- ternary " --compatible-mode" "" .Values.metrics.compatibleMode -}} +{{- end -}} \ No newline at end of file diff --git a/install-bundler/install-mongo/mongodb/templates/arbiter/configmap.yaml b/install-bundler/install-mongo/mongodb/templates/arbiter/configmap.yaml new file mode 100644 index 00000000..eab1e056 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/arbiter/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.arbiter.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ print "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/arbiter/headless-svc.yaml b/install-bundler/install-mongo/mongodb/templates/arbiter/headless-svc.yaml new file mode 100644 index 00000000..003bebb2 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/arbiter/headless-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.arbiter.service.nameOverride" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if or .Values.arbiter.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-mongodb + port: {{ .Values.arbiter.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.arbiter.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/arbiter/pdb.yaml b/install-bundler/install-mongo/mongodb/templates/arbiter/pdb.yaml new file mode 100644 index 00000000..37b5c198 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/arbiter/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "mongodb.arbiter.enabled" .) .Values.arbiter.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.arbiter.pdb.minAvailable }} + minAvailable: {{ .Values.arbiter.pdb.minAvailable }} + {{- end }} + {{- if .Values.arbiter.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.arbiter.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: arbiter +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/arbiter/statefulset.yaml b/install-bundler/install-mongo/mongodb/templates/arbiter/statefulset.yaml new file mode 100644 index 00000000..a1e26989 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/arbiter/statefulset.yaml @@ -0,0 +1,288 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.arbiter.enabled" .) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-arbiter" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: arbiter + {{- if or .Values.arbiter.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ include "mongodb.arbiter.service.nameOverride" . }} + podManagementPolicy: {{ .Values.arbiter.podManagementPolicy }} + {{- if .Values.arbiter.updateStrategy }} + updateStrategy: {{- toYaml .Values.arbiter.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.arbiter.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: arbiter + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: arbiter + {{- if or (include "mongodb.arbiter.createConfigmap" .) .Values.arbiter.podAnnotations }} + annotations: + {{- if (include "mongodb.arbiter.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/arbiter/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.arbiter.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.arbiter.schedulerName }} + schedulerName: {{ .Values.arbiter.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.arbiter.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAffinityPreset "component" "arbiter" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.arbiter.podAntiAffinityPreset "component" "arbiter" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.arbiter.nodeAffinityPreset.type "key" .Values.arbiter.nodeAffinityPreset.key "values" .Values.arbiter.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.arbiter.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.arbiter.priorityClassName }} + priorityClassName: {{ .Values.arbiter.priorityClassName }} + {{- end }} + {{- if .Values.arbiter.runtimeClassName }} + runtimeClassName: {{ .Values.arbiter.runtimeClassName }} + {{- end }} + {{- if .Values.arbiter.podSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.arbiter.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.arbiter.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if .Values.arbiter.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + mountPath: /certs/CAs + {{- else }} + - name: mongodb-certs-0 + mountPath: /certs-0 + {{- end }} + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /bitnami/scripts + command: + - /bitnami/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.arbiter.service.nameOverride" . }} + {{- end }} + containers: + - name: mongodb-arbiter + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.arbiter.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.arbiter.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.arbiter.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.arbiter.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.arbiter.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.arbiter.service.nameOverride" . }}" + - name: MONGODB_REPLICA_SET_MODE + value: "arbiter" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.%s.$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) (include "mongodb.service.nameOverride" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: MONGODB_PORT_NUMBER + value: {{ .Values.arbiter.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- $extraFlags := .Values.arbiter.extraFlags | join " " -}} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.arbiter.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.extraEnvVarsCM .Values.arbiter.extraEnvVarsSecret }} + envFrom: + {{- if .Values.arbiter.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.arbiter.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.arbiter.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.arbiter.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ .Values.arbiter.containerPorts.mongodb }} + name: mongodb + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.arbiter.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- if .Values.arbiter.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- if .Values.arbiter.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.arbiter.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.arbiter.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: mongodb + {{- end }} + {{- end }} + {{- if .Values.arbiter.resources }} + resources: {{- toYaml .Values.arbiter.resources | nindent 12 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumeMounts .Values.tls.enabled }} + volumeMounts: + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.arbiter.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.arbiter.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap .Values.arbiter.extraVolumes .Values.tls.enabled }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0555 + {{- if or .Values.arbiter.configuration .Values.arbiter.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.arbiter.configmapName" . }} + {{- end }} + {{- if and .Values.tls.enabled .Values.arbiter.enabled }} + - name: certs + emptyDir: {} + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- else }} + - name: mongodb-certs-0 + secret: + secretName: {{ include "common.tplvalues.render" ( dict "value" .Values.tls.arbiter.existingSecret "context" $) }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.arbiter.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.arbiter.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/backup/cronjob.yaml b/install-bundler/install-mongo/mongodb/templates/backup/cronjob.yaml new file mode 100644 index 00000000..ccdb25b1 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/backup/cronjob.yaml @@ -0,0 +1,135 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.backup.enabled }} +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "mongodb.fullname" . }}-mongodump + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodump + {{- if .Values.backup.cronjob.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.backup.cronjob.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + schedule: {{ quote .Values.backup.cronjob.schedule }} + concurrencyPolicy: {{ .Values.backup.cronjob.concurrencyPolicy }} + failedJobsHistoryLimit: {{ .Values.backup.cronjob.failedJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ .Values.backup.cronjob.successfulJobsHistoryLimit }} + {{- if .Values.backup.cronjob.startingDeadlineSeconds }} + startingDeadlineSeconds: {{ .Values.backup.cronjob.startingDeadlineSeconds }} + {{- end }} + jobTemplate: + spec: + {{- if .Values.backup.cronjob.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.backup.cronjob.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 12 }} + app.kubernetes.io/component: mongodump + {{- if .Values.backup.cronjob.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.labels "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.backup.cronjob.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.annotations "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + spec: + containers: + - name: {{ include "mongodb.fullname" . }}-mongodump + image: {{ include "mongodb.image" . }} + env: + {{- if .Values.auth.enabled }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + - name: MONGODB_SERVICE_NAME + value: {{ include "mongodb.service.nameOverride" . }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.containerPorts.mongodb | quote }} + - name: MONGODUMP_DIR + value: {{ .Values.backup.cronjob.storage.mountPath }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.backup.cronjob.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.command "context" $) | nindent 14 }} + {{- else }} + command: + - /bin/sh + - -c + - "mongodump {{- if .Values.auth.enabled }} --username=${MONGODB_ROOT_USER} --password=${MONGODB_ROOT_PASSWORD} {{- end }} --host=${MONGODB_SERVICE_NAME} --port=${MONGODB_PORT_NUMBER} ${MONGODB_CLIENT_EXTRA_FLAGS} --oplog --gzip --archive=${MONGODUMP_DIR}/mongodump-$(date '+%Y-%m-%d-%H-%M').gz" + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + - name: datadir + mountPath: {{ .Values.backup.cronjob.storage.mountPath }} + subPath: {{ .Values.backup.cronjob.storage.subPath }} + securityContext: + {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.containerSecurityContext "context" $) | nindent 14 }} + restartPolicy: {{ .Values.backup.cronjob.restartPolicy }} + volumes: + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- else }} + - name: mongodb-certs-0 + secret: + secretName: {{ include "common.tplvalues.render" ( dict "value" .Values.tls.standalone.existingSecret "context" $) }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.backup.cronjob.storage.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.backup.cronjob.storage.existingClaim .) }} + {{- else }} + - name: datadir + persistentVolumeClaim: + claimName: {{ include "mongodb.fullname" . }}-mongodump + {{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/backup/pvc.yaml b/install-bundler/install-mongo/mongodb/templates/backup/pvc.yaml new file mode 100644 index 00000000..caefc05f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/backup/pvc.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.backup.enabled (not .Values.backup.cronjob.storage.existingClaim) -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "mongodb.fullname" . }}-mongodump + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: mongodump + {{- if .Values.backup.cronjob.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.backup.cronjob.labels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.backup.cronjob.annotations .Values.commonAnnotations .Values.backup.cronjob.storage.resourcePolicy}} + annotations: + {{- if .Values.backup.cronjob.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.backup.cronjob.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.backup.cronjob.storage.resourcePolicy }} + helm.sh/resource-policy: {{ .Values.backup.cronjob.storage.resourcePolicy | quote }} + {{- end }} + {{- end }} +spec: + accessModes: + {{- range .Values.backup.cronjob.storage.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.backup.cronjob.storage.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.backup.cronjob.storage "global" .Values.global) | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/common-scripts-cm.yaml b/install-bundler/install-mongo/mongodb/templates/common-scripts-cm.yaml new file mode 100644 index 00000000..381c99b6 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/common-scripts-cm.yaml @@ -0,0 +1,146 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "mongodb.fullname" . }} + startup-probe.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + # Probes are using localhost/127.0.0.1 to tests if the service is up, ready or healthy. If TLS is enabled, we shouldn't validate the certificate hostname. + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert --tlsAllowInvalidHostnames' + {{- end }} + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep 'true' + readiness-probe.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + # Probes are using localhost/127.0.0.1 to tests if the service is up, ready or healthy. If TLS is enabled, we shouldn't validate the certificate hostname. + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert --tlsAllowInvalidHostnames' + {{- end }} + # Run the proper check depending on the version + [[ $(mongod -version | grep "db version") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]} + . /opt/bitnami/scripts/libversion.sh + VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)" + VERSION_MINOR="$(get_sematic_version "$VERSION" 2)" + VERSION_PATCH="$(get_sematic_version "$VERSION" 3)" + readiness_test='db.isMaster().ismaster || db.isMaster().secondary' + if [[ ( "$VERSION_MAJOR" -ge 5 ) || ( "$VERSION_MAJOR" -ge 4 && "$VERSION_MINOR" -ge 4 && "$VERSION_PATCH" -ge 2 ) ]]; then + readiness_test='db.hello().isWritablePrimary || db.hello().secondary' + fi + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval "${readiness_test}" | grep 'true' + ping-mongodb.sh: | + #!/bin/bash + {{- if .Values.tls.enabled }} + # Probes are using localhost/127.0.0.1 to tests if the service is up, ready or healthy. If TLS is enabled, we shouldn't validate the certificate hostname. + TLS_OPTIONS='--tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert --tlsAllowInvalidHostnames' + {{- end }} + mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval "db.adminCommand('ping')" + {{- if .Values.tls.enabled }} + generate-certs.sh: | + #!/bin/bash + {{- if (include "mongodb.autoGenerateCerts" .) }} + additional_ips=() + additional_names=() + while getopts "i:n:s:" flag + do + case "${flag}" in + i) read -a additional_ips <<< ${OPTARG//,/ } ;; + n) read -a additional_names <<< ${OPTARG//,/ } ;; + s) svc=${OPTARG// /} ;; + \?) exit 1 ;; + esac + done + + my_hostname=$(hostname) + cp /certs/CAs/* /certs/ + cat >/certs/openssl.cnf <>/certs/openssl.cnf <>/certs/openssl.cnf < /certs/mongodb.pem + cd /certs/ + shopt -s extglob + rm -rf !(mongodb-ca-cert|mongodb.pem|CAs|openssl.cnf) + chmod 0600 mongodb-ca-cert mongodb.pem + {{- else }} + {{- if eq .Values.architecture "standalone" }} + ID="0" + {{- else }} + if [[ "$MY_POD_NAME" =~ "arbiter-0"$ ]]; then + ID="0" + elif [[ "$MY_POD_NAME" =~ "hidden-"[0-9]{1,}$ ]]; then + ID="${MY_POD_NAME#"{{ printf "%s-hidden-" $fullname }}"}" + else + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + fi + {{- end }} + + {{- if .Values.tls.pemChainIncluded }} + #Split the pem chain by the END CERTIFICATE string and store in files /certs/xx00, /certs/xx01 etc. + cat /certs-${ID}/tls.crt | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f /certs/xx + + #Use first certificate as leaf node and combine with key to store in pem file + cat "/certs/xx00" "/certs-${ID}/tls.key" > "/certs/mongodb.pem" + + #Use remaining intermediate certificates for ca.crt + echo $(find /certs/ -not -name 'xx00' -name 'xx*') | sort | xargs cat > "/certs/mongodb-ca-cert" + + rm -rf /certs/xx* + {{- else }} + cat "/certs-${ID}/tls.crt" "/certs-${ID}/tls.key" > "/certs/mongodb.pem" + cp "/certs-${ID}/ca.crt" "/certs/mongodb-ca-cert" + {{- end }} + + chmod 0600 /certs/mongodb-ca-cert /certs/mongodb.pem + {{- end }} + {{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/configmap.yaml b/install-bundler/install-mongo/mongodb/templates/configmap.yaml new file mode 100644 index 00000000..7d11e087 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/extra-list.yaml b/install-bundler/install-mongo/mongodb/templates/extra-list.yaml new file mode 100644 index 00000000..2d35a580 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/hidden/configmap.yaml b/install-bundler/install-mongo/mongodb/templates/hidden/configmap.yaml new file mode 100644 index 00000000..a8308736 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/hidden/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.hidden.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + mongodb.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.configuration "context" $) | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/hidden/external-access-svc.yaml b/install-bundler/install-mongo/mongodb/templates/hidden/external-access-svc.yaml new file mode 100644 index 00000000..6cbbb40b --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/hidden/external-access-svc.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "mongodb.hidden.enabled" .) .Values.externalAccess.hidden.enabled }} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.hidden.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-hidden-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-hidden-%d-external" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $root.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + pod: {{ $targetPod }} + {{- if or $root.Values.externalAccess.hidden.service.annotations $root.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.externalAccess.hidden.service.annotations $root.Values.commonAnnotations ) "context" $ ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.hidden.service.type }} + {{- if eq $root.Values.externalAccess.hidden.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.hidden.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.hidden.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.loadBalancerClass }} + loadBalancerClass: {{ $root.Values.externalAccess.hidden.service.loadBalancerClass }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.hidden.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + allocateLoadBalancerNodePorts: {{ $root.Values.externalAccess.hidden.service.allocateLoadBalancerNodePorts }} + {{- end }} + {{- if (or (eq $root.Values.externalAccess.hidden.service.type "LoadBalancer") (eq $root.Values.externalAccess.hidden.service.type "NodePort")) }} + externalTrafficPolicy: {{ $root.Values.externalAccess.hidden.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.sessionAffinity }} + sessionAffinity: {{ $root.Values.externalAccess.hidden.service.sessionAffinity }} + {{- end }} + {{- if $root.Values.externalAccess.hidden.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.hidden.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.externalAccess.hidden.service.portName | quote }} + port: {{ $root.Values.externalAccess.hidden.service.ports.mongodb }} + {{- if not (empty $root.Values.externalAccess.hidden.service.nodePorts) }} + {{- $nodePort := index $root.Values.externalAccess.hidden.service.nodePorts $i }} + nodePort: {{ $nodePort }} + {{- else }} + nodePort: null + {{- end }} + targetPort: mongodb + {{- if $root.Values.externalAccess.hidden.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.hidden.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.hidden.podLabels $root.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/hidden/headless-svc.yaml b/install-bundler/install-mongo/mongodb/templates/hidden/headless-svc.yaml new file mode 100644 index 00000000..da252841 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/hidden/headless-svc.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.hidden.enabled" .) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if or .Values.hidden.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: {{ .Values.hidden.service.portName | quote }} + port: {{ .Values.hidden.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.hidden.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/hidden/pdb.yaml b/install-bundler/install-mongo/mongodb/templates/hidden/pdb.yaml new file mode 100644 index 00000000..5420a932 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/hidden/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "mongodb.hidden.enabled" .) .Values.hidden.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" . )}} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.hidden.pdb.minAvailable }} + minAvailable: {{ .Values.hidden.pdb.minAvailable }} + {{- end }} + {{- if .Values.hidden.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.hidden.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: hidden +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/hidden/statefulset.yaml b/install-bundler/install-mongo/mongodb/templates/hidden/statefulset.yaml new file mode 100644 index 00000000..241f554e --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/hidden/statefulset.yaml @@ -0,0 +1,555 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.hidden.enabled" .) }} +{{- $replicaCount := int .Values.hidden.replicaCount }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.hidden.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.hidden.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.hidden.service.type "LoadBalancer")) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-hidden" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: hidden + {{- if or .Values.hidden.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + podManagementPolicy: {{ .Values.hidden.podManagementPolicy }} + replicas: {{ .Values.hidden.replicaCount }} + {{- if .Values.hidden.updateStrategy }} + updateStrategy: {{- toYaml .Values.hidden.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.hidden.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: hidden + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: hidden + {{- if or (include "mongodb.hidden.createConfigmap" .) .Values.hidden.podAnnotations }} + annotations: + {{- if (include "mongodb.hidden.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/hidden/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.hidden.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hidden.schedulerName }} + schedulerName: {{ .Values.hidden.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.hidden.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAffinityPreset "component" "hidden" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.hidden.podAntiAffinityPreset "component" "hidden" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.hidden.nodeAffinityPreset.type "key" .Values.hidden.nodeAffinityPreset.key "values" .Values.hidden.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hidden.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.hidden.priorityClassName }} + priorityClassName: {{ .Values.hidden.priorityClassName }} + {{- end }} + {{- if .Values.hidden.runtimeClassName }} + runtimeClassName: {{ .Values.hidden.runtimeClassName }} + {{- end }} + {{- if .Values.hidden.podSecurityContext.enabled }} + securityContext: {{- omit .Values.hidden.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.hidden.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.hidden.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.hidden.initContainers (and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled) (and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.hidden.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.hidden.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} + find {{ printf "%s/%s" .Values.hidden.persistence.mountPath (default "" .Values.hidden.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.hidden.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + mountPath: /certs/CAs + {{- else }} + {{- range $index, $_ := .Values.tls.hidden.existingSecrets }} + - name: mongodb-certs-{{ $index }} + mountPath: /certs-{{ $index }} + {{- end }} + {{- end }} + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /bitnami/scripts + command: + - /bitnami/scripts/generate-certs.sh + args: + - -s {{ printf "%s-hidden-headless" (include "mongodb.fullname" .) }} + {{- if .Values.externalAccess.hidden.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.hidden.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: auto-discovery + image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.hidden.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.hidden.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.hidden.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.command "context" $) | nindent 12 }} + {{- else }} + command: + - /scripts/setup-hidden.sh + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.hidden.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.hidden.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.service.nameOverride" . }}" + - name: K8S_HIDDEN_NODE_SERVICE_NAME + value: "{{ include "mongodb.fullname" . }}-hidden-headless" + - name: MONGODB_REPLICA_SET_MODE + value: "hidden" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + {{- if and .Values.replicaSetHostnames (not .Values.externalAccess.hidden.enabled) }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_HIDDEN_NODE_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.hidden.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.hidden.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.hidden.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.hidden.extraEnvVarsCM .Values.hidden.extraEnvVarsSecret }} + envFrom: + {{- if .Values.hidden.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.hidden.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.hidden.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.hidden.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - containerPort: {{ .Values.hidden.containerPorts.mongodb }} + name: mongodb + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.hidden.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.hidden.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.hidden.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.hidden.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.hidden.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.hidden.resources }} + resources: {{- toYaml .Values.hidden.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.hidden.persistence.mountPath }} + subPath: {{ .Values.hidden.persistence.subPath }} + - name: common-scripts + mountPath: /bitnami/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + - name: scripts + mountPath: /scripts/setup-hidden.sh + subPath: setup-hidden.sh + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: shared + mountPath: /shared + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.hidden.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + /bin/mongodb_exporter {{ include "mongodb.exporterArgs" $ }} --mongodb.direct-connect --mongodb.global-conn-pool --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9216 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.hidden.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0555 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.hidden.configuration .Values.hidden.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.hidden.configmapName" . }} + {{- end }} + {{- if and .Values.externalAccess.hidden.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.hidden.service.type "LoadBalancer") }} + - name: shared + emptyDir: {} + {{- end }} + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0755 + {{- if .Values.hidden.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- else }} + {{- range $index, $secret := .Values.tls.hidden.existingSecrets }} + - name: mongodb-certs-{{ $index }} + secret: + secretName: {{ include "common.tplvalues.render" ( dict "value" $secret "context" $) }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- end }} + {{- if not .Values.hidden.persistence.enabled }} + - name: datadir + {{- if .Values.hidden.persistence.medium }} + emptyDir: + medium: {{ .Values.hidden.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.hidden.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.hidden.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.hidden.persistence.size | quote }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.requests }} + {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.hidden.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.hidden.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.hidden.persistence "global" .Values.global) }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/initialization-configmap.yaml b/install-bundler/install-mongo/mongodb/templates/initialization-configmap.yaml new file mode 100644 index 00000000..7086e0bf --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/initialization-configmap.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.initdbScripts (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/metrics-svc.yaml b/install-bundler/install-mongo/mongodb/templates/metrics-svc.yaml new file mode 100644 index 00000000..f21401ee --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/metrics-svc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - port: {{ .Values.metrics.service.ports.metrics }} + targetPort: metrics + protocol: TCP + name: http-metrics + {{- if .Values.metrics.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/prometheusrule.yaml b/install-bundler/install-mongo/mongodb/templates/prometheusrule.yaml new file mode 100644 index 00000000..00df349f --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.prometheusRule.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "mongodb.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/psp.yaml b/install-bundler/install-mongo/mongodb/templates/psp.yaml new file mode 100644 index 00000000..1d344866 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/psp.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- if .Values.podSecurityPolicy.spec }} +{{ include "common.tplvalues.render" ( dict "value" .Values.podSecurityPolicy.spec "context" $ ) | nindent 2 }} +{{- else }} + allowPrivilegeEscalation: {{ .Values.podSecurityPolicy.allowPrivilegeEscalation }} + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.podSecurityContext.fsGroup }} + max: {{ .Values.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: {{ .Values.podSecurityPolicy.privileged }} + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/external-access-svc.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/external-access-svc.yaml new file mode 100644 index 00000000..3bc99005 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/external-access-svc.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (not (eq .Values.externalAccess.service.type "ClusterIP")) }} +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $root.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + pod: {{ $targetPod }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations ) "context" $ ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if and (eq $root.Values.externalAccess.service.type "LoadBalancer") $root.Values.externalAccess.service.loadBalancerClass }} + loadBalancerClass: {{ $root.Values.externalAccess.service.loadBalancerClass }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + allocateLoadBalancerNodePorts: {{ $root.Values.externalAccess.service.allocateLoadBalancerNodePorts }} + {{- end }} + {{- if (or (eq $root.Values.externalAccess.service.type "LoadBalancer") (eq $root.Values.externalAccess.service.type "NodePort")) }} + externalTrafficPolicy: {{ $root.Values.externalAccess.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if $root.Values.externalAccess.service.sessionAffinity }} + sessionAffinity: {{ $root.Values.externalAccess.service.sessionAffinity }} + {{- end }} + {{- if $root.Values.externalAccess.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + publishNotReadyAddresses: true + ports: + - name: {{ $root.Values.externalAccess.service.portName | quote }} + port: {{ $root.Values.externalAccess.service.ports.mongodb }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + {{- $nodePort := index $root.Values.externalAccess.service.nodePorts $i }} + nodePort: {{ $nodePort }} + {{- else }} + nodePort: null + {{- end }} + targetPort: mongodb + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.podLabels $root.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/headless-svc.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/headless-svc.yaml new file mode 100644 index 00000000..bf7a3d91 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/headless-svc.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.service.nameOverride" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: {{ .Values.service.portName | quote }} + port: {{ .Values.service.ports.mongodb }} + targetPort: mongodb + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/pdb.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/pdb.yaml new file mode 100644 index 00000000..5bbd3107 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/pdb.yaml @@ -0,0 +1,28 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replicaset") .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/scripts-configmap.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/scripts-configmap.yaml new file mode 100644 index 00000000..d13ab25c --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/scripts-configmap.yaml @@ -0,0 +1,311 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replicaset" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- if and .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + setup.sh: |- + #!/bin/bash + + . /opt/bitnami/scripts/mongodb-env.sh + . /opt/bitnami/scripts/libfs.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + {{- if .Values.externalAccess.enabled }} + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})" + {{- else }} + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then + export MONGODB_ADVERTISED_PORT_NUMBER=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + fi + {{- if .Values.externalAccess.service.domain }} + export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.service.domain }} + {{- else }} + export MONGODB_ADVERTISED_HOSTNAME=$MY_POD_HOST_IP + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.replicaSetConfigurationSettings.enabled }} + # placed here before root password env is overwritten + # makes no assumption about starting state + # ensures that any stepDown or non-default starting state is handled + /scripts/replicaSetConfigurationSettings.sh & + {{- end }} + + if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then + export MONGODB_ADVERTISED_PORT_NUMBER="$MONGODB_PORT_NUMBER" + fi + + info "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME" + info "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER" + + # Check for existing replica set in case there is no data in the PVC + # This is for cases where the PVC is lost or for MongoDB caches without + # persistence + current_primary="" + if is_dir_empty "${MONGODB_DATA_DIR}/db"; then + info "Data dir empty, checking if the replica set already exists" + {{- $replicaCount := int .Values.replicaCount }} + {{- $portNumber := int .Values.service.ports.mongodb }} + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} + {{- $mongoList := list }} + {{- range $e, $i := until $replicaCount }} + {{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $portNumber) }} + {{- end }} + + {{- if .Values.externalAccess.externalMaster.enabled }} + current_primary={{ printf "%s:%d" (.Values.externalAccess.externalMaster.host) ( int .Values.externalAccess.externalMaster.port) }} + {{- else }} + current_primary=$(mongosh admin --host "{{ join "," $mongoList }}" {{- if .Values.auth.enabled }} --authenticationDatabase admin -u $MONGODB_ROOT_USER -p $MONGODB_ROOT_PASSWORD{{- end }}{{- if .Values.tls.enabled}} --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert{{- end }} --eval 'db.runCommand("ismaster")' | awk -F\' '/primary/ {print $2}') + {{- end }} + if ! is_empty_value "$current_primary"; then + info "Detected existing primary: ${current_primary}" + fi + fi + + if ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" == "$current_primary" ]]; then + info "Advertised name matches current primary, configuring node as a primary" + export MONGODB_REPLICA_SET_MODE="primary" + elif ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" != "$current_primary" ]]; then + info "Current primary is different from this node. Configuring the node as replica of ${current_primary}" + export MONGODB_REPLICA_SET_MODE="secondary" + export MONGODB_INITIAL_PRIMARY_HOST="${current_primary%:*}" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="${current_primary#*:}" + export MONGODB_SET_SECONDARY_OK="yes" + elif [[ "$MY_POD_NAME" = "{{ $fullname }}-0" ]]; then + info "Pod name matches initial primary pod name, configuring node as a primary" + export MONGODB_REPLICA_SET_MODE="primary" + else + info "Pod name doesn't match initial primary pod name, configuring node as a secondary" + export MONGODB_REPLICA_SET_MODE="secondary" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER" + fi + + if [[ "$MONGODB_REPLICA_SET_MODE" == "secondary" ]]; then + export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER" + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + export MONGODB_ROOT_PASSWORD="" + export MONGODB_EXTRA_USERNAMES="" + export MONGODB_EXTRA_DATABASES="" + export MONGODB_EXTRA_PASSWORDS="" + export MONGODB_ROOT_PASSWORD_FILE="" + export MONGODB_EXTRA_USERNAMES_FILE="" + export MONGODB_EXTRA_DATABASES_FILE="" + export MONGODB_EXTRA_PASSWORDS_FILE="" + fi + + exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh + setup-hidden.sh: |- + #!/bin/bash + + . /opt/bitnami/scripts/mongodb-env.sh + + {{- if .Values.externalAccess.hidden.enabled }} + {{- if eq .Values.externalAccess.hidden.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export MONGODB_ADVERTISED_HOSTNAME="$(<${SHARED_FILE})" + {{- else }} + ID="${MY_POD_NAME#"{{ $fullname }}-hidden-"}" + export MONGODB_ADVERTISED_HOSTNAME=$(echo '{{ .Values.externalAccess.hidden.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else if eq .Values.externalAccess.hidden.service.type "NodePort" }} + ID="${MY_POD_NAME#"{{ $fullname }}-hidden-"}" + if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then + export MONGODB_ADVERTISED_PORT_NUMBER=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + fi + {{- if .Values.externalAccess.hidden.service.domain }} + export MONGODB_ADVERTISED_HOSTNAME={{ .Values.externalAccess.hidden.service.domain }} + {{- else }} + export MONGODB_ADVERTISED_HOSTNAME=$MY_POD_HOST_IP + {{- end }} + {{- end }} + {{- end }} + + {{- if .Values.replicaSetConfigurationSettings.enabled }} + # placed here before root password env is overwritten + # makes no assumption about starting state + # ensures that any stepDown or non-default starting state is handled + /scripts/replicaSetConfigurationSettings.sh & + {{- end }} + + echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME" + echo "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER" + echo "Configuring node as a hidden node" + export MONGODB_REPLICA_SET_MODE="hidden" + export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER" + export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD" + export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER" + export MONGODB_ROOT_PASSWORD="" + export MONGODB_EXTRA_USERNAMES="" + export MONGODB_EXTRA_DATABASES="" + export MONGODB_EXTRA_PASSWORDS="" + export MONGODB_ROOT_PASSWORD_FILE="" + export MONGODB_EXTRA_USERNAMES_FILE="" + export MONGODB_EXTRA_DATABASES_FILE="" + export MONGODB_EXTRA_PASSWORDS_FILE="" + exec /opt/bitnami/scripts/mongodb/entrypoint.sh /opt/bitnami/scripts/mongodb/run.sh + {{- if .Values.replicaSetConfigurationSettings.enabled }} + replicaSetConfigurationSettings.sh: |- + #!/bin/bash + # This script to be called when pod starts. + # This script sets rs settings which can not be applied via conf file + + function logger () + #$1 is the line to be logged + { + echo "replicaSetConfigurationSettings.sh -- ${1}" >&1 + } + + SLEEP_PERIOD=10 + + {{- if and .Values.auth.enabled .Values.auth.rootPassword }} + usernameAndPassword="-u ${MONGODB_ROOT_USER} -p ${MONGODB_ROOT_PASSWORD}" + {{- else }} + usernameAndPassword="" + {{- end }} + + # load Values.replicaSetConfigurationSettings.configuration into associtive array which makes iterating and string manipulation easy + declare -A desiredRsConf + {{ range $setting, $value := .Values.replicaSetConfigurationSettings.configuration -}} + {{ printf "desiredRsConf[%s]='%v'" $setting $value }} + {{ end }} + + rsConfWriteAttempts=0 + rs_conf_configured_ok=unknown + + while [[ "${rs_conf_configured_ok}" != "true" ]]; do + + # give the rs setup a chance to succeed before attempting to read or configure + sleep ${SLEEP_PERIOD} + + counter=0 + while ! mongosh ${usernameAndPassword} --eval 'rs.conf()'; do + counter=$((${counter} +1)) + logger "not yet able to read rs.conf settings from the currently running rs (after ${counter} attempts)" + sleep ${SLEEP_PERIOD} + done + counter=$((${counter} +1)) + logger "rs.conf settings have been read from the currently running rs (after ${counter} attempts)" + + # read rs.conf again and store it. settings format is '"" : ,' + currentRsConf=$(mongosh ${usernameAndPassword} --eval 'rs.conf()') + + desiredEqualsactual=unknown + settingsToConfigure="" + for key in ${!desiredRsConf[@]}; do + value=${desiredRsConf[$key]} + if ! $(echo "\"${currentRsConf}"\" | grep -q -e "${key}: ${value},"); then + logger "rs conf setting: ${key} value will be set to: ${value}" + settingsToConfigure="${settingsToConfigure}cfg.settings.${key} = ${value}; " + desiredEqualsactual=false + else + logger "rs conf: ${key} is already at desired value: ${value}" + fi + done + + if [[ "${desiredEqualsactual}" != "false" ]]; then + logger "replicaSetConfigurationSettings match the settings of the currently running rs" + desiredEqualsactual=true + rs_conf_configured_ok=true + logger "Current settings match desired settings (There have been ${rsConfWriteAttempts} attempts to write to mongoDB rs configuration)" + exit + fi + + # apply the settings only if this member is currently the mongo replicaset PRIMARY + # it might take a little time before any pod is PRIMARY + isMaster=unknown + if ! mongosh ${usernameAndPassword} --eval 'rs.isMaster()' | grep -q "ismaster: true"; then + isMaster=false + logger "This node is not yet PRIMARY - replicaSetConfigurationSettings will only be set on the member that is currently PRIMARY" + else + isMaster=true + logger "This node is PRIMARY" + fi + + if [[ "${isMaster}" == "true" ]]; then + logger "This node is currently PRIMARY - will apply rs.conf settings" + + # avoiding tricky string substitution with single quotes by making the eval string a set of vars + rsconf="cfg = rs.conf();" + rsreconf="rs.reconfig(cfg);" + rsCommand="${rsconf} ${settingsToConfigure} ${rsreconf}" + + mongosh ${usernameAndPassword} --eval "${rsCommand}" + if [ $? -ne 0 ]; then + logger "Failed to apply mongodb cfg.settings configuration" + else + logger "mongodb replicaset cfg.settings configuration applied" + logger "Will check rs conf" + # don't exit just yet - the settings will be checked in the next loop + fi + rsConfWriteAttempts=$((${rsConfWriteAttempts} + 1 )) + fi + done + {{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/statefulset.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/statefulset.yaml new file mode 100644 index 00000000..a62f3ff7 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/statefulset.yaml @@ -0,0 +1,558 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if eq .Values.architecture "replicaset" }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if or .Values.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ include "mongodb.service.nameOverride" . }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + find {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + mountPath: /certs/CAs + {{- else }} + {{- range $index, $_ := .Values.tls.replicaset.existingSecrets }} + - name: mongodb-certs-{{ $index }} + mountPath: /certs-{{ $index }} + {{- end }} + {{- end }} + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /bitnami/scripts + command: + - /bitnami/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.service.nameOverride" . }} + {{- if .Values.externalAccess.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: auto-discovery + image: {{ include "mongodb.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- else }} + command: + - /scripts/setup.sh + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: K8S_SERVICE_NAME + value: "{{ include "mongodb.service.nameOverride" . }}" + - name: MONGODB_INITIAL_PRIMARY_HOST + value: {{ printf "%s-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.%s" (include "mongodb.fullname" .) .Values.clusterDomain }} + - name: MONGODB_REPLICA_SET_NAME + value: {{ .Values.replicaSetName | quote }} + {{- if and .Values.replicaSetHostnames (not .Values.externalAccess.enabled) }} + - name: MONGODB_ADVERTISED_HOSTNAME + value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- end }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + - name: MONGODB_REPLICA_SET_KEY + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-replica-set-key + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ .Values.containerPorts.mongodb }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/ping-mongodb.sh + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/readiness-probe.sh + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + - name: common-scripts + mountPath: /bitnami/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{ if .Values.replicaSetConfigurationSettings.enabled }} + - name: scripts + mountPath: /scripts/replicaSetConfigurationSettings.sh + subPath: replicaSetConfigurationSettings.sh + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + mountPath: /shared + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + /bin/mongodb_exporter {{ include "mongodb.exporterArgs" $ }} --mongodb.direct-connect --mongodb.global-conn-pool --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0550 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (eq .Values.externalAccess.service.type "LoadBalancer") }} + - name: shared + emptyDir: {} + {{- end }} + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0755 + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- else }} + {{- range $index, $secret := .Values.tls.replicaset.existingSecrets }} + - name: mongodb-certs-{{ $index }} + secret: + secretName: {{ include "common.tplvalues.render" ( dict "value" $secret "context" $) }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + {{- if .Values.persistence.medium }} + emptyDir: + medium: {{ .Values.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.volumeClaimTemplates.requests }} + {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.requests "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.persistence.volumeClaimTemplates.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/replicaset/svc.yaml b/install-bundler/install-mongo/mongodb/templates/replicaset/svc.yaml new file mode 100644 index 00000000..d9550437 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/replicaset/svc.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replicaset") .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "ClusterIP") }} + +{{- $fullName := include "mongodb.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d" $fullName $i }} + namespace: {{ include "mongodb.namespace" $ }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $root.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if or $root.Values.service.annotations $root.Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.service.annotations $root.Values.commonAnnotations ) "context" $ ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + ports: + - name: {{ $root.Values.service.portName | quote }} + port: {{ $root.Values.service.ports.mongodb }} + targetPort: mongodb + {{- if $root.Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $root.Values.podLabels $root.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/role.yaml b/install-bundler/install-mongo/mongodb/templates/role.yaml new file mode 100644 index 00000000..82b2293e --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/role.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- if .Values.rbac.rules }} +{{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} +{{- end -}} +{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- if and $pspAvailable .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "mongodb.fullname" . }}] +{{- end -}} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/rolebinding.yaml b/install-bundler/install-mongo/mongodb/templates/rolebinding.yaml new file mode 100644 index 00000000..c6a76e64 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} +roleRef: + kind: Role + name: {{ include "mongodb.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . | quote }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/secrets-ca.yaml b/install-bundler/install-mongo/mongodb/templates/secrets-ca.yaml new file mode 100644 index 00000000..e1387ae3 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/secrets-ca.yaml @@ -0,0 +1,33 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "mongodb.createTlsSecret" .) }} +{{- $secretName := printf "%s" (include "mongodb.tlsSecretName" .) }} +{{- $fullname := include "mongodb.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $cn := printf "%s.%s.svc.%s" $fullname .Release.Namespace $clusterDomain }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ template "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if or .Values.tls.caCert .Values.tls.caKey (not .Values.tls.autoGenerated) }} + {{- $ca := buildCustomCert (required "A valid .Values.tls.caCert is required!" .Values.tls.caCert) (required "A valid .Values.tls.caKey is required!" .Values.tls.caKey) }} + mongodb-ca-cert: {{ b64enc $ca.Cert }} + mongodb-ca-key: {{ b64enc $ca.Key }} + {{- else }} + {{- $ca := genCA "myMongo-ca" 3650 }} + mongodb-ca-cert: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "mongodb-ca-cert" "defaultValue" $ca.Cert "context" $) }} + mongodb-ca-key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "mongodb-ca-key" "defaultValue" $ca.Key "context" $) }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/secrets.yaml b/install-bundler/install-mongo/mongodb/templates/secrets.yaml new file mode 100644 index 00000000..00a0dfb3 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/secrets.yaml @@ -0,0 +1,126 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.auth.enabled }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $port := .Values.service.ports.mongodb }} +{{- $host := include "mongodb.service.nameOverride" . }} +{{- $hostForURI := printf "%s:%s" (include "mongodb.service.nameOverride" .) (print $port) }} +{{- if (eq .Values.architecture "replicaset") }} + {{- $fullname := include "mongodb.fullname" . }} + {{- $releaseNamespace := include "mongodb.namespace" . }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $mongoList := list }} + {{- $mongoOnlyHostList := list }} + {{- range $e, $i := until $replicaCount }} + {{- $mongoOnlyHostList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s" $fullname $i $fullname $releaseNamespace $clusterDomain) }} + {{- $mongoList = append $mongoList (printf "%s-%d.%s-headless.%s.svc.%s:%s" $fullname $i $fullname $releaseNamespace $clusterDomain (print $port)) }} + {{- end }} + {{- $host = (join "," $mongoOnlyHostList) }} + {{- $hostForURI = (join "," $mongoList) }} +{{- end }} + +{{/* Root user section. */}} +{{- $rootPassword := include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.secretName" .) "key" "mongodb-root-password" "providedValues" (list "auth.rootPassword" ) "context" $) | trimAll "\"" | b64dec }} + +{{/* Custom user section. This chart allows creating multiple users */}} +{{- $customUsers := include "mongodb.customUsers" . }} +{{- $customDatabases := include "mongodb.customDatabases" . }} +{{- $customPasswords := include "mongodb.customPasswords" . }} +{{- $passwords := "" }} +{{- $passwordList := list -}} +{{- $customUsersList := list }} +{{- $customDatabasesList := list }} +{{- $customPasswordsList := list }} +{{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} +{{- $customUsersList = splitList "," $customUsers }} +{{- $customDatabasesList = splitList "," $customDatabases }} +{{- if not (empty $customPasswords) }} +{{- $passwordList = $customPasswords }} +{{- $customPasswordsList = splitList "," $customPasswords }} +{{- else }} +{{- range $customUsersList }} +{{- $customPasswordsList = append $customPasswordsList (randAlphaNum 10) }} +{{- end -}} +{{- $passwordList = (join "," $customPasswordsList) }} +{{- end }} +{{- $passwords = include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.secretName" .) "key" "mongodb-passwords" "providedValues" (list "mongodbPasswords") "context" (set (deepCopy $) "Values" (dict "mongodbPasswords" $passwordList))) | trimAll "\"" | b64dec }} +{{- end }} + +{{- if (include "mongodb.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ template "mongodb.namespace" . }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + mongodb-root-password: {{ print $rootPassword | b64enc | quote }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + mongodb-passwords: {{ print $passwords | b64enc | quote }} + {{- end }} + {{- if .Values.metrics.username }} + mongodb-metrics-password: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-metrics-password" "providedValues" (list "metrics.password" ) "context" $) }} + {{- end }} + {{- if eq .Values.architecture "replicaset" }} + mongodb-replica-set-key: {{ include "common.secrets.passwords.manage" (dict "secret" (include "mongodb.fullname" .) "key" "mongodb-replica-set-key" "providedValues" (list "auth.replicaSetKey" ) "context" $) }} + {{- end }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind-root + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/mongodb +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "mongodb" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + username: {{ print .Values.auth.rootUser | b64enc | quote }} + password: {{ print $rootPassword | b64enc | quote }} + database: {{ print "admin" | b64enc | quote }} + uri: {{ printf "mongodb://%s:%s@%s/admin" .Values.auth.rootUser $rootPassword $hostForURI | b64enc | quote }} +{{- range $e, $i := until (len $customUsersList) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" $ }}-svcbind-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/mongodb +data: + {{- $currentUser := index $customUsersList $i }} + {{- $currentDatabase := last $customDatabasesList }} + {{- if gt (len $customDatabasesList) $i }} + {{- $currentDatabase = index $customDatabasesList $i }} + {{- end }} + {{- $currentPassword := index $customPasswordsList $i }} + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "mongodb" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + username: {{ print $currentUser | b64enc | quote }} + password: {{ print $currentPassword | b64enc | quote }} + database: {{ print $currentDatabase | b64enc | quote }} + uri: {{ printf "mongodb://%s:%s@%s/%s" $currentUser $currentPassword $hostForURI $currentDatabase | b64enc | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/serviceaccount.yaml b/install-bundler/install-mongo/mongodb/templates/serviceaccount.yaml new file mode 100644 index 00000000..6dd53ab8 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mongodb.serviceAccountName" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +secrets: + - name: {{ template "mongodb.fullname" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/servicemonitor.yaml b/install-bundler/install-mongo/mongodb/templates/servicemonitor.yaml new file mode 100644 index 00000000..8273e4b6 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.serviceMonitor.namespace" . }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - "{{ include "mongodb.namespace" . }}" +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/standalone/dep-sts.yaml b/install-bundler/install-mongo/mongodb/templates/standalone/dep-sts.yaml new file mode 100644 index 00000000..c63f7a26 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/standalone/dep-sts.yaml @@ -0,0 +1,476 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: {{ if .Values.useStatefulSet }}{{ include "common.capabilities.statefulset.apiVersion" . }}{{- else }}{{ include "common.capabilities.deployment.apiVersion" . }}{{- end }} +kind: {{ if .Values.useStatefulSet }}StatefulSet{{- else }}Deployment{{- end }} +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if or .Values.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + {{- if .Values.useStatefulSet }} + serviceName: {{ include "mongodb.service.nameOverride" . }} + {{- end }} + {{- if .Values.updateStrategy}} + {{- if .Values.useStatefulSet }} + updateStrategy: + {{- else }} + strategy: + {{- end }} + {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end}} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: mongodb + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: mongodb + {{- if or (include "mongodb.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "mongodb.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- include "mongodb.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "mongodb.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "mongodb" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "mongodb" "customLabels" $podLabels "topologyKey" .Values.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.runtimeClassName }} + runtimeClassName: {{ .Values.runtimeClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{ if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if or .Values.initContainers (and .Values.volumePermissions.enabled .Values.persistence.enabled) .Values.tls.enabled }} + initContainers: + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "mongodb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} + find {{ printf "%s/%s" .Values.persistence.mountPath (default "" .Values.persistence.subPath) }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: generate-tls-certs + image: {{ include "mongodb.tls.image" . }} + imagePullPolicy: {{ .Values.tls.image.pullPolicy | quote }} + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: MY_POD_HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + mountPath: /certs/CAs + {{- else }} + - name: mongodb-certs-0 + mountPath: /certs-0 + {{- end }} + - name: certs + mountPath: /certs + - name: common-scripts + mountPath: /bitnami/scripts + command: + - /bitnami/scripts/generate-certs.sh + args: + - -s {{ include "mongodb.service.nameOverride" . }} + {{- if .Values.externalAccess.service.loadBalancerIPs }} + - -i {{ join "," .Values.externalAccess.service.loadBalancerIPs }} + {{- end }} + {{- if .Values.tls.extraDnsNames }} + - -n {{ join "," .Values.tls.extraDnsNames }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: mongodb + image: {{ include "mongodb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- $customUsers := include "mongodb.customUsers" . -}} + {{- $customDatabases := include "mongodb.customDatabases" . -}} + {{- if not (empty $customUsers) }} + - name: MONGODB_EXTRA_USERNAMES + value: {{ $customUsers | quote }} + {{- end }} + {{- if not (empty $customDatabases) }} + - name: MONGODB_EXTRA_DATABASES + value: {{ $customDatabases | quote }} + {{- end }} + {{- if .Values.auth.enabled }} + {{- if and (not (empty $customUsers)) (not (empty $customDatabases)) }} + - name: MONGODB_EXTRA_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-passwords + {{- end }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- end }} + {{- if and .Values.metrics.enabled (not (empty .Values.metrics.username)) }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + {{- if .Values.auth.enabled }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + - name: MONGODB_SYSTEM_LOG_VERBOSITY + value: {{ .Values.systemLogVerbosity | quote }} + - name: MONGODB_DISABLE_SYSTEM_LOG + value: {{ ternary "yes" "no" .Values.disableSystemLog | quote }} + - name: MONGODB_DISABLE_JAVASCRIPT + value: {{ ternary "yes" "no" .Values.disableJavascript | quote }} + - name: MONGODB_ENABLE_JOURNAL + value: {{ ternary "yes" "no" .Values.enableJournal | quote }} + - name: MONGODB_PORT_NUMBER + value: {{ .Values.containerPorts.mongodb | quote }} + - name: MONGODB_ENABLE_IPV6 + value: {{ ternary "yes" "no" .Values.enableIPv6 | quote }} + - name: MONGODB_ENABLE_DIRECTORY_PER_DB + value: {{ ternary "yes" "no" .Values.directoryPerDB | quote }} + {{- $extraFlags := .Values.extraFlags | join " " -}} + {{- if .Values.tls.enabled }} + {{- $extraFlags = printf "--tlsMode=%s --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert %s" .Values.tls.mode $extraFlags }} + {{- end }} + {{- if ne $extraFlags "" }} + - name: MONGODB_EXTRA_FLAGS + value: {{ $extraFlags | quote }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: MONGODB_CLIENT_EXTRA_FLAGS + value: --tls --tlsCertificateKeyFile=/certs/mongodb.pem --tlsCAFile=/certs/mongodb-ca-cert + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . | quote }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ tpl .Values.extraEnvVarsSecret . | quote }} + {{- end }} + {{- end }} + ports: + - name: mongodb + containerPort: {{ .Values.containerPorts.mongodb }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/ping-mongodb.sh + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/readiness-probe.sh + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - /bitnami/scripts/startup-probe.sh + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: datadir + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + - name: common-scripts + mountPath: /bitnami/scripts + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/mongodb/conf/mongodb.conf + subPath: mongodb.conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "mongodb.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -ec + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.args "context" $) | nindent 12 }} + {{- else }} + args: + - | + /bin/mongodb_exporter {{ include "mongodb.exporterArgs" $ }} --mongodb.direct-connect --mongodb.global-conn-pool --web.listen-address ":{{ .Values.metrics.containerPort }}" --mongodb.uri "{{ include "mongodb.mongodb_exporter.uri" . }}" {{ .Values.metrics.extraFlags }} + {{- end }} + env: + {{- if .Values.auth.enabled }} + {{- if not .Values.metrics.username }} + - name: MONGODB_ROOT_USER + value: {{ .Values.auth.rootUser | quote }} + - name: MONGODB_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-root-password + {{- else }} + - name: MONGODB_METRICS_USERNAME + value: {{ .Values.metrics.username | quote }} + - name: MONGODB_METRICS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "mongodb.secretName" . }} + key: mongodb-metrics-password + {{- end }} + {{- end }} + volumeMounts: + {{- if .Values.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: common-scripts + configMap: + name: {{ printf "%s-common-scripts" (include "mongodb.fullname" .) }} + defaultMode: 0550 + {{- if or .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "mongodb.initdbScriptsCM" . }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "mongodb.configmapName" . }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: certs + emptyDir: {} + {{- if (include "mongodb.autoGenerateCerts" .) }} + - name: certs-volume + secret: + secretName: {{ template "mongodb.tlsSecretName" . }} + items: + - key: mongodb-ca-cert + path: mongodb-ca-cert + mode: 0600 + - key: mongodb-ca-key + path: mongodb-ca-key + mode: 0600 + {{- else }} + - name: mongodb-certs-0 + secret: + secretName: {{ include "common.tplvalues.render" ( dict "value" .Values.tls.standalone.existingSecret "context" $) }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: datadir + {{- if .Values.persistence.medium }} + emptyDir: + medium: {{ .Values.persistence.medium | quote }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.persistence.existingClaim }} + - name: datadir + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.useStatefulSet }} + - name: datadir + persistentVolumeClaim: + claimName: {{ template "mongodb.fullname" . }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: datadir + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.volumeClaimTemplates.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.volumeClaimTemplates.selector "context" $) | nindent 10 }} + {{- end }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/standalone/pvc.yaml b/install-bundler/install-mongo/mongodb/templates/standalone/pvc.yaml new file mode 100644 index 00000000..d59bad91 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/standalone/pvc.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (not (eq .Values.architecture "replicaset")) (not .Values.useStatefulSet) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "mongodb.fullname" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + annotations: + {{- if .Values.persistence.resourcePolicy }} + helm.sh/resource-policy: {{ .Values.persistence.resourcePolicy | quote }} + {{- end }} + {{- if or .Values.persistence.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/templates/standalone/svc.yaml b/install-bundler/install-mongo/mongodb/templates/standalone/svc.yaml new file mode 100644 index 00000000..5d0d03f3 --- /dev/null +++ b/install-bundler/install-mongo/mongodb/templates/standalone/svc.yaml @@ -0,0 +1,62 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not (eq .Values.architecture "replicaset") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mongodb.service.nameOverride" . }} + namespace: {{ include "mongodb.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerClass }} + loadBalancerClass: {{ .Values.service.loadBalancerClass }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{ toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if (eq .Values.service.type "LoadBalancer") }} + allocateLoadBalancerNodePorts: {{ .Values.service.allocateLoadBalancerNodePorts }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + ports: + - name: {{ .Values.service.portName | quote }} + port: {{ .Values.service.ports.mongodb }} + targetPort: mongodb + {{- if and (or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")) .Values.service.nodePorts.mongodb }} + nodePort: {{ .Values.service.nodePorts.mongodb }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: mongodb +{{- end }} diff --git a/install-bundler/install-mongo/mongodb/values.yaml b/install-bundler/install-mongo/mongodb/values.yaml new file mode 100644 index 00000000..776bc78d --- /dev/null +++ b/install-bundler/install-mongo/mongodb/values.yaml @@ -0,0 +1,2300 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.namespaceOverride Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + namespaceOverride: "" + +## @section Common parameters +## + +## @param nameOverride String to partially override mongodb.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override mongodb.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## extraDeploy: +## This needs to be uncommented and added to 'extraDeploy' in order to use the replicaset 'mongo-labeler' sidecar +## for dynamically discovering the mongodb primary pod +## suggestion is to use a hard-coded and predictable TCP port for the primary mongodb pod (here is 30001, choose your own) +## - apiVersion: v1 +## kind: Service +## metadata: +## name: mongodb-primary +## namespace: the-mongodb-namespace +## labels: +## app.kubernetes.io/component: mongodb +## app.kubernetes.io/instance: mongodb +## app.kubernetes.io/managed-by: Helm +## app.kubernetes.io/name: mongodb +## spec: +## type: NodePort +## externalTrafficPolicy: Cluster +## ports: +## - name: mongodb +## port: 30001 +## nodePort: 30001 +## protocol: TCP +## targetPort: mongodb +## selector: +## app.kubernetes.io/component: mongodb +## app.kubernetes.io/instance: mongodb +## app.kubernetes.io/name: mongodb +## primary: "true" +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template +## +commonLabels: {} +## @param commonAnnotations Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template +## +commonAnnotations: {} + +## @param topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used +## i.e. topologyKey: topology.kubernetes.io/zone +## +topologyKey: "" + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section MongoDB(®) parameters +##docker pull zcube/bitnami-compat-mongodb + +## Bitnami MongoDB(®) image +## ref: https://hub.docker.com/r/bitnami/mongodb/tags/ +## @param image.registry MongoDB(®) image registry +## @param image.repository MongoDB(®) image registry +## @param image.tag MongoDB(®) image tag (immutable tags are recommended) +## @param image.digest MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy MongoDB(®) image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: docker.io + repository: zcube/bitnami-compat-mongodb + tag: latest + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: true + +## @param schedulerName Name of the scheduler (other than default) to dispatch pods +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param architecture MongoDB(®) architecture (`standalone` or `replicaset`) +## +architecture: standalone +## @param useStatefulSet Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) +## +useStatefulSet: false +## MongoDB(®) Authentication parameters +## +auth: + ## @param auth.enabled Enable authentication + ## ref: https://docs.mongodb.com/manual/tutorial/enable-authentication/ + ## + enabled: true + ## @param auth.rootUser MongoDB(®) root user + ## + rootUser: root + ## @param auth.rootPassword MongoDB(®) root password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#setting-the-root-user-and-password-on-first-run + ## + rootPassword: rootPasswordMongo + ## MongoDB(®) custom users and databases + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#creating-a-user-and-database-on-first-run + ## @param auth.usernames List of custom users to be created during the initialization + ## @param auth.passwords List of passwords for the custom users set at `auth.usernames` + ## @param auth.databases List of custom databases to be created during the initialization + ## + usernames: [] + passwords: [] + databases: [] + ## @param auth.username DEPRECATED: use `auth.usernames` instead + ## @param auth.password DEPRECATED: use `auth.passwords` instead + ## @param auth.database DEPRECATED: use `auth.databases` instead + ## + username: "" + password: "" + database: "" + ## @param auth.replicaSetKey Key used for authentication in the replicaset (only when `architecture=replicaset`) + ## + replicaSetKey: "" + ## @param auth.existingSecret Existing secret with MongoDB(®) credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, `mongodb-replica-set-key`) + ## NOTE: When it's set the previous parameters are ignored. + ## + existingSecret: "" +tls: + ## @param tls.enabled Enable MongoDB(®) TLS support between nodes in the cluster as well as between mongo clients and nodes + ## + enabled: false + ## @param tls.autoGenerated Generate a custom CA and self-signed certificates + ## + autoGenerated: true + ## @param tls.existingSecret Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`) + ## NOTE: When it's set it will disable secret creation. + ## + existingSecret: "" + ## Add Custom CA certificate + ## @param tls.caCert Custom CA certificated (base64 encoded) + ## @param tls.caKey CA certificate private key (base64 encoded) + ## + caCert: "" + caKey: "" + ## @param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + standalone: + ## @param tls.standalone.existingSecret Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. + ## NOTE: When it's set it will disable certificate self-generation from existing CA. + ## + existingSecret: "" + replicaset: + ## @param tls.replicaset.existingSecrets Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. + ## existingSecrets: + ## - "mySecret-0" + ## - "mySecret-1" + ## NOTE: When it's set it will disable certificate self-generation from existing CA. + ## + existingSecrets: [] + hidden: + ## @param tls.hidden.existingSecrets Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. + ## existingSecrets: + ## - "mySecret-0" + ## - "mySecret-1" + ## NOTE: When it's set it will disable certificate self-generation from existing CA. + ## + existingSecrets: [] + arbiter: + ## @param tls.arbiter.existingSecret Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. + ## NOTE: When it's set it will disable certificate self-generation from existing CA. + ## + existingSecret: "" + ## Bitnami Nginx image + ## @param tls.image.registry Init container TLS certs setup image registry + ## @param tls.image.repository Init container TLS certs setup image repository + ## @param tls.image.tag Init container TLS certs setup image tag (immutable tags are recommended) + ## @param tls.image.digest Init container TLS certs setup image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param tls.image.pullPolicy Init container TLS certs setup image pull policy + ## @param tls.image.pullSecrets Init container TLS certs specify docker-registry secret names as an array + ## @param tls.extraDnsNames Add extra dns names to the CA, can solve x509 auth issue for pod clients + ## + image: + registry: docker.io + repository: bitnami/nginx + tag: 1.25.2-debian-11-r25 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## e.g: + ## extraDnsNames + ## "DNS.6": "$my_host" + ## "DNS.7": "$test" + ## + extraDnsNames: [] + ## @param tls.mode Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) + ## + mode: requireTLS + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param tls.resources.limits Init container generate-tls-certs resource limits + ## @param tls.resources.requests Init container generate-tls-certs resource requests + ## + resources: + ## Example: + limits: + cpu: 250m + memory: 250Mi + requests: + cpu: 512m + memory: 512Mi + +## @param hostAliases Add deployment host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param replicaSetName Name of the replica set (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## +replicaSetName: rs0 +## @param replicaSetHostnames Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## Ignored when externalAccess.enabled=true +## +replicaSetHostnames: true +## @param enableIPv6 Switch to enable/disable IPv6 on MongoDB(®) +## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#enablingdisabling-ipv6 +## +enableIPv6: false +## @param directoryPerDB Switch to enable/disable DirectoryPerDB on MongoDB(®) +## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#enablingdisabling-directoryperdb +## +directoryPerDB: false +## MongoDB(®) System Log configuration +## ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#configuring-system-log-verbosity-level +## @param systemLogVerbosity MongoDB(®) system log verbosity level +## @param disableSystemLog Switch to enable/disable MongoDB(®) system log +## +systemLogVerbosity: 0 +disableSystemLog: false +## @param disableJavascript Switch to enable/disable MongoDB(®) server-side JavaScript execution +## ref: https://docs.mongodb.com/manual/core/server-side-javascript/ +## +disableJavascript: false +## @param enableJournal Switch to enable/disable MongoDB(®) Journaling +## ref: https://docs.mongodb.com/manual/reference/configuration-options/#mongodb-setting-storage.journal.enabled +## +enableJournal: true +## @param configuration MongoDB(®) configuration file to be used for Primary and Secondary nodes +## For documentation of all options, see: http://docs.mongodb.org/manual/reference/configuration-options/ +## Example: +## configuration: |- +## # where and how to store data. +## storage: +## dbPath: /bitnami/mongodb/data/db +## journal: +## enabled: true +## directoryPerDB: false +## # where to write logging data +## systemLog: +## destination: file +## quiet: false +## logAppend: true +## logRotate: reopen +## path: /opt/bitnami/mongodb/logs/mongodb.log +## verbosity: 0 +## # network interfaces +## net: +## port: 27017 +## unixDomainSocket: +## enabled: true +## pathPrefix: /opt/bitnami/mongodb/tmp +## ipv6: false +## bindIpAll: true +## # replica set options +## #replication: +## #replSetName: replicaset +## #enableMajorityReadConcern: true +## # process management options +## processManagement: +## fork: false +## pidFilePath: /opt/bitnami/mongodb/tmp/mongodb.pid +## # set parameter options +## setParameter: +## enableLocalhostAuthBypass: true +## # security options +## security: +## authorization: disabled +## #keyFile: /opt/bitnami/mongodb/conf/keyfile +## +configuration: "" +## @section replicaSetConfigurationSettings settings applied during runtime (not via configuration file) +## If enabled, these are applied by a script which is called within setup.sh +## for documentation see https://docs.mongodb.com/manual/reference/replica-configuration/#replica-set-configuration-fields +## @param replicaSetConfigurationSettings.enabled Enable MongoDB(®) Switch to enable/disable configuring MongoDB(®) run time rs.conf settings +## @param replicaSetConfigurationSettings.configuration run-time rs.conf settings +## +replicaSetConfigurationSettings: + enabled: false + configuration: {} +## chainingAllowed : false +## heartbeatTimeoutSecs : 10 +## heartbeatIntervalMillis : 2000 +## electionTimeoutMillis : 10000 +## catchUpTimeoutMillis : 30000 +## @param existingConfigmap Name of existing ConfigMap with MongoDB(®) configuration for Primary and Secondary nodes +## NOTE: When it's set the arbiter.configuration parameter is ignored +## +existingConfigmap: "" +## @param initdbScripts Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsConfigMap Existing ConfigMap with custom initdb scripts +## +initdbScriptsConfigMap: "" +## Command and args for running the container (set to default if not set). Use array form +## @param command Override default container command (useful when using custom images) +## @param args Override default container args (useful when using custom images) +## +command: [] +args: [] +## @param extraFlags MongoDB(®) additional command line flags +## Example: +## extraFlags: +## - "--wiredTigerCacheSizeGB=2" +## +extraFlags: [] +## @param extraEnvVars Extra environment variables to add to MongoDB(®) pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## @section MongoDB(®) statefulset parameters +## + +## @param annotations Additional labels to be added to the MongoDB(®) statefulset. Evaluated as a template +## +annotations: {} +## @param labels Annotations to be added to the MongoDB(®) statefulset. Evaluated as a template +## +labels: {} +## @param replicaCount Number of MongoDB(®) nodes (only when `architecture=replicaset`) +## Ignored when mongodb.architecture=standalone +## +replicaCount: 2 +## @param updateStrategy.type Strategy to use to replace existing MongoDB(®) pods. When architecture=standalone and useStatefulSet=false, +## this parameter will be applied on a deployment object. In other case it will be applied on a statefulset object +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +## Example: +## updateStrategy: +## type: RollingUpdate +## rollingUpdate: +## maxSurge: 25% +## maxUnavailable: 25% +## +updateStrategy: + type: RollingUpdate +## @param podManagementPolicy Pod management policy for MongoDB(®) +## Should be initialized one by one when building the replicaset for the first time +## +podManagementPolicy: OrderedReady +## @param podAffinityPreset MongoDB(®) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset MongoDB(®) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type MongoDB(®) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key MongoDB(®) Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values MongoDB(®) Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity MongoDB(®) Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector MongoDB(®) Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations MongoDB(®) Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints MongoDB(®) Spread Constraints for Pods +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] +## @param lifecycleHooks LifecycleHook for the MongoDB(®) container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds MongoDB(®) Termination Grace Period +## +terminationGracePeriodSeconds: "" +## @param podLabels MongoDB(®) pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations MongoDB(®) Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param priorityClassName Name of the existing priority class to be used by MongoDB(®) pod(s) +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param runtimeClassName Name of the runtime class to be used by MongoDB(®) pod(s) +## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ +## +runtimeClassName: "" +## MongoDB(®) pods' Security Context. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable MongoDB(®) pod(s)' Security Context +## @param podSecurityContext.fsGroup Group ID for the volumes of the MongoDB(®) pod(s) +## @param podSecurityContext.sysctls sysctl settings of the MongoDB(®) pod(s)' +## +podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] +## MongoDB(®) containers' Security Context (main and metrics container). +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable MongoDB(®) container(s)' Security Context +## @param containerSecurityContext.runAsUser User ID for the MongoDB(®) container +## @param containerSecurityContext.runAsGroup Group ID for the MongoDB(®) container +## @param containerSecurityContext.runAsNonRoot Set MongoDB(®) container's Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate MongoDB(®) pod(s) privileges +## @param containerSecurityContext.seccompProfile.type Set MongoDB(®) container's Security Context seccompProfile type +## @param containerSecurityContext.capabilities.drop Set MongoDB(®) container's Security Context capabilities to drop +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL +## MongoDB(®) containers' resource requests and limits. +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for MongoDB(®) containers +## @param resources.requests The requested resources for MongoDB(®) containers +## +resources: + ## Example: + limits: + cpu: 1024m + memory: 1024Mi + requests: + cpu: 2048m + memory: 2048Mi +## @param containerPorts.mongodb MongoDB(®) container port +## +containerPorts: + mongodb: 27017 +## MongoDB(®) pods' liveness probe. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 +## MongoDB(®) pods' readiness probe. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 +## Slow starting containers can be protected through startup probes +## Startup probes are available in Kubernetes version 1.16 and above +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 30 +## @param customLivenessProbe Override default liveness probe for MongoDB(®) containers +## Ignored when livenessProbe.enabled=true +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe for MongoDB(®) containers +## Ignored when readinessProbe.enabled=true +## +customReadinessProbe: {} +## @param customStartupProbe Override default startup probe for MongoDB(®) containers +## Ignored when startupProbe.enabled=true +## +customStartupProbe: {} +## @param initContainers Add additional init containers for the hidden node pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add additional sidecar containers for the MongoDB(®) pod(s) +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## This is an optional 'mongo-labeler' sidecar container that tracks replica-set for the primary mongodb pod +## and labels it dynamically with ' primary: "true" ' in order for an extra-deployed service to always expose +## and attach to the primary pod, this needs to be uncommented along with the suggested 'extraDeploy' example +## and the suggested rbac example for the pod to be allowed adding labels to mongo replica pods +## search 'mongo-labeler' through this file to find the sections that needs to be uncommented to make it work +## +## - name: mongo-labeler +## image: korenlev/k8s-mongo-labeler-sidecar +## imagePullPolicy: Always +## env: +## - name: LABEL_SELECTOR +## value: "app.kubernetes.io/component=mongodb,app.kubernetes.io/instance=mongodb,app.kubernetes.io/name=mongodb" +## - name: NAMESPACE +## value: "the-mongodb-namespace" +## - name: DEBUG +## value: "true" +## +sidecars: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the MongoDB(®) container(s) +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes to the MongoDB(®) statefulset +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## MongoDB(®) Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation for MongoDB(®) pod(s) + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of MongoDB(®) pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of MongoDB(®) pods that may be made unavailable after the eviction + ## + maxUnavailable: "" + +## @section Traffic exposure parameters +## + +## Service parameters +## +service: + ## @param service.nameOverride MongoDB(®) service name + ## + nameOverride: "" + ## @param service.type Kubernetes Service type (only for standalone architecture) + ## + type: ClusterIP + ## @param service.portName MongoDB(®) service port name (only for standalone architecture) + ## + portName: mongodb + ## @param service.ports.mongodb MongoDB(®) service port. + ## + ports: + mongodb: 27017 + ## @param service.nodePorts.mongodb Port to bind to for NodePort and LoadBalancer service types (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + mongodb: "" + ## @param service.clusterIP MongoDB(®) service cluster IP (only for standalone architecture) + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.externalIPs Specify the externalIP value ClusterIP service type (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + ## @param service.loadBalancerIP loadBalancerIP for MongoDB(®) Service (only for standalone architecture) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerClass loadBalancerClass for MongoDB(®) Service (only for standalone architecture) + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer (only for standalone architecture) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + loadBalancerSourceRanges: [] + ## @param service.allocateLoadBalancerNodePorts Wheter to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.annotations Provide any additional annotations that may be required + ## + annotations: {} + ## @param service.externalTrafficPolicy service external traffic policy (only for standalone architecture) + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} +## External Access to MongoDB(®) nodes configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to MongoDB(®) nodes (only for replicaset architecture) + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs by querying the K8s API + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Init container auto-discovery image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.25.14-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param externalAccess.autoDiscovery.resources.limits Init container auto-discovery resource limits + ## @param externalAccess.autoDiscovery.resources.requests Init container auto-discovery resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Parameters to configure a set of Pods that connect to an existing MongoDB(®) deployment that lies outside of Kubernetes. + ## @param externalAccess.externalMaster.enabled Use external master for bootstrapping + ## @param externalAccess.externalMaster.host External master host to bootstrap from + ## @param externalAccess.externalMaster.port Port for MongoDB(®) service external master host + ## + externalMaster: + enabled: false + host: "" + port: 27017 + ## Parameters to configure K8s service(s) used to externally access MongoDB(®) + ## A new service per broker will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.service.portName MongoDB(®) port name used for external access when service type is LoadBalancer + ## + portName: "mongodb" + ## @param externalAccess.service.ports.mongodb MongoDB(®) port used for external access when service type is LoadBalancer + ## + ports: + mongodb: 27017 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for MongoDB(®) nodes + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerClass loadBalancerClass when service type is LoadBalancer + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.service.allocateLoadBalancerNodePorts Wheter to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param externalAccess.service.externalTrafficPolicy MongoDB(®) service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param externalAccess.service.nodePorts Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.service.domain Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort + ## If not specified, the container will try to get the kubernetes node external IP + ## e.g: + ## domain: mydomain.com + ## + domain: "" + ## @param externalAccess.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param externalAccess.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param externalAccess.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## External Access to MongoDB(®) Hidden nodes configuration + ## + hidden: + ## @param externalAccess.hidden.enabled Enable Kubernetes external cluster access to MongoDB(®) hidden nodes + ## + enabled: false + ## Parameters to configure K8s service(s) used to externally access MongoDB(®) + ## A new service per broker will be created + ## + service: + ## @param externalAccess.hidden.service.type Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer + ## + type: LoadBalancer + ## @param externalAccess.hidden.service.portName MongoDB(®) port name used for external access when service type is LoadBalancer + ## + portName: "mongodb" + ## @param externalAccess.hidden.service.ports.mongodb MongoDB(®) port used for external access when service type is LoadBalancer + ## + ports: + mongodb: 27017 + ## @param externalAccess.hidden.service.loadBalancerIPs Array of load balancer IPs for MongoDB(®) nodes + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.hidden.service.loadBalancerClass loadBalancerClass when service type is LoadBalancer + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + ## @param externalAccess.hidden.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.hidden.service.allocateLoadBalancerNodePorts Wheter to allocate node ports when service type is LoadBalancer + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + ## + allocateLoadBalancerNodePorts: true + ## @param externalAccess.hidden.service.externalTrafficPolicy MongoDB(®) service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Local + ## @param externalAccess.hidden.service.nodePorts Array of node ports used to configure MongoDB(®) advertised hostname when service type is NodePort. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.hidden.service.domain Domain or external IP used to configure MongoDB(®) advertised hostname when service type is NodePort + ## If not specified, the container will try to get the kubernetes node external IP + ## e.g: + ## domain: mydomain.com + ## + domain: "" + ## @param externalAccess.hidden.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param externalAccess.hidden.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.hidden.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param externalAccess.hidden.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## @section Persistence parameters +## + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable MongoDB(®) data persistence using PVC + ## + enabled: true + ## @param persistence.medium Provide a medium for `emptyDir` volumes. + ## Requires persistence.enabled: false + ## + medium: "" + ## @param persistence.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## Ignored when mongodb.architecture=replicaset + ## + existingClaim: "" + ## @param persistence.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + ## + resourcePolicy: "" + ## @param persistence.storageClass PVC Storage Class for MongoDB(®) data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param persistence.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for MongoDB(®) data volume + ## + size: 8Gi + ## @param persistence.annotations PVC annotations + ## + annotations: {} + ## @param persistence.mountPath Path to mount the volume at + ## MongoDB(®) images. + ## + mountPath: /bitnami/mongodb + ## @param persistence.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param persistence.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param persistence.volumeClaimTemplates.requests Custom PVC requests attributes + ## Sometime cloud providers use additional requests attributes to provision custom storage instance + ## See https://cloud.ibm.com/docs/containers?topic=containers-file_storage#file_dynamic_statefulset + ## + requests: {} + ## @param persistence.volumeClaimTemplates.dataSource Add dataSource to the VolumeClaimTemplate + ## + dataSource: {} + +## @section Backup parameters +## This section implements a trivial logical dump cronjob of the database. +## This only comes with the consistency guarantees of the dump program. +## This is not a snapshot based roll forward/backward recovery backup. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ +## +backup: + ## @param backup.enabled Enable the logical dump of the database "regularly" + ## + enabled: false + ## Fine tuning cronjob's config + ## + cronjob: + ## @param backup.cronjob.schedule Set the cronjob parameter schedule + ## + schedule: "@daily" + ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + ## + concurrencyPolicy: Allow + ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit + ## + failedJobsHistoryLimit: 1 + ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit + ## + successfulJobsHistoryLimit: 3 + ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds + ## + startingDeadlineSeconds: "" + ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished + ## + ttlSecondsAfterFinished: "" + ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy + ## + restartPolicy: OnFailure + ## backup container's Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param backup.cronjob.containerSecurityContext.runAsUser User ID for the backup container + ## @param backup.cronjob.containerSecurityContext.runAsGroup Group ID for the backup container + ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set backup container's Security Context runAsNonRoot + ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Is the container itself readonly + ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate backup pod(s) privileges + ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set backup container's Security Context seccompProfile type + ## @param backup.cronjob.containerSecurityContext.capabilities.drop Set backup container's Security Context capabilities to drop + ## + containerSecurityContext: + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param backup.cronjob.command Set backup container's command to run + ## + command: [] + ## @param backup.cronjob.labels Set the cronjob labels + ## + labels: {} + ## @param backup.cronjob.annotations Set the cronjob annotations + ## + annotations: {} + ## Backup container's + ## + storage: + ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: "" + ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + ## + resourcePolicy: "" + ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param backup.cronjob.storage.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume + ## + size: 8Gi + ## @param backup.cronjob.storage.annotations PVC annotations + ## + annotations: {} + ## @param backup.cronjob.storage.mountPath Path to mount the volume at + ## + mountPath: /backup/mongodb + ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + +## @section RBAC parameters +## + +## ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for MongoDB(®) pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the mongodb.fullname template + ## + name: "" + ## @param serviceAccount.annotations Additional Service Account annotations + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding MongoDB(®) ServiceAccount to a role + ## that allows MongoDB(®) pods querying the K8s API + ## this needs to be set to 'true' to enable the mongo-labeler sidecar primary mongodb discovery + ## + create: false + ## @param rbac.rules Custom rules to create following the role specification + ## The example below needs to be uncommented to use the 'mongo-labeler' sidecar for dynamic discovery of the primary mongodb pod: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## - watch + ## - update + ## + rules: [] +## PodSecurityPolicy configuration +## Be sure to also set rbac.create to true, otherwise Role and RoleBinding won't be created. +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.allowPrivilegeEscalation Enable privilege escalation + ## Either use predefined policy with some adjustments or use `podSecurityPolicy.spec` + ## + allowPrivilegeEscalation: false + ## @param podSecurityPolicy.privileged Allow privileged + ## + privileged: false + ## @param podSecurityPolicy.spec Specify the full spec to use for Pod Security Policy + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## Defining a spec ignores the above values. + ## + spec: {} + ## Example: + ## allowPrivilegeEscalation: false + ## fsGroup: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## hostIPC: false + ## hostNetwork: false + ## hostPID: false + ## privileged: false + ## readOnlyRootFilesystem: false + ## requiredDropCapabilities: + ## - ALL + ## runAsUser: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## seLinux: + ## rule: 'RunAsAny' + ## supplementalGroups: + ## rule: 'MustRunAs' + ## ranges: + ## - min: 1001 + ## max: 1001 + ## volumes: + ## - 'configMap' + ## - 'secret' + ## - 'emptyDir' + ## - 'persistentVolumeClaim' + ## + +## @section Volume Permissions parameters +## +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r66 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false and shmVolume.chmod.enabled=false + ## @param volumePermissions.securityContext.runAsUser User ID for the volumePermissions container + ## + securityContext: + runAsUser: 0 + +## @section Arbiter parameters +## + +arbiter: + ## @param arbiter.enabled Enable deploying the arbiter + ## https://docs.mongodb.com/manual/tutorial/add-replica-set-arbiter/ + ## + enabled: true + ## @param arbiter.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param arbiter.configuration Arbiter configuration file to be used + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + ## @param arbiter.existingConfigmap Name of existing ConfigMap with Arbiter configuration + ## NOTE: When it's set the arbiter.configuration parameter is ignored + ## + existingConfigmap: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param arbiter.command Override default container command (useful when using custom images) + ## @param arbiter.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## @param arbiter.extraFlags Arbiter additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + ## @param arbiter.extraEnvVars Extra environment variables to add to Arbiter pods + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param arbiter.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param arbiter.extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param arbiter.annotations Additional labels to be added to the Arbiter statefulset + ## + annotations: {} + ## @param arbiter.labels Annotations to be added to the Arbiter statefulset + ## + labels: {} + ## @param arbiter.topologySpreadConstraints MongoDB(®) Spread Constraints for arbiter Pods + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + ## @param arbiter.lifecycleHooks LifecycleHook for the Arbiter container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param arbiter.terminationGracePeriodSeconds Arbiter Termination Grace Period + ## + terminationGracePeriodSeconds: "" + ## @param arbiter.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param arbiter.podManagementPolicy Pod management policy for MongoDB(®) + ## Should be initialized one by one when building the replicaset for the first time + ## + podManagementPolicy: OrderedReady + ## @param arbiter.schedulerName Name of the scheduler (other than default) to dispatch pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param arbiter.podAffinityPreset Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param arbiter.podAntiAffinityPreset Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param arbiter.nodeAffinityPreset.type Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param arbiter.nodeAffinityPreset.key Arbiter Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param arbiter.nodeAffinityPreset.values Arbiter Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param arbiter.affinity Arbiter Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: arbiter.podAffinityPreset, arbiter.podAntiAffinityPreset, and arbiter.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param arbiter.nodeSelector Arbiter Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param arbiter.tolerations Arbiter Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param arbiter.podLabels Arbiter pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param arbiter.podAnnotations Arbiter Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param arbiter.priorityClassName Name of the existing priority class to be used by Arbiter pod(s) + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param arbiter.runtimeClassName Name of the runtime class to be used by Arbiter pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## MongoDB(®) Arbiter pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param arbiter.podSecurityContext.enabled Enable Arbiter pod(s)' Security Context + ## @param arbiter.podSecurityContext.fsGroup Group ID for the volumes of the Arbiter pod(s) + ## @param arbiter.podSecurityContext.sysctls sysctl settings of the Arbiter pod(s)' + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + ## MongoDB(®) Arbiter containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param arbiter.containerSecurityContext.enabled Enable Arbiter container(s)' Security Context + ## @param arbiter.containerSecurityContext.runAsUser User ID for the Arbiter container + ## @param arbiter.containerSecurityContext.runAsGroup Group ID for the Arbiter container + ## @param arbiter.containerSecurityContext.runAsNonRoot Set Arbiter containers' Security Context runAsNonRoot + ## @param arbiter.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Arbiter pod(s) privileges + ## @param arbiter.containerSecurityContext.seccompProfile.type Set Arbiter container's Security Context seccompProfile type + ## @param arbiter.containerSecurityContext.capabilities.drop Set Arbiter container's Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## MongoDB(®) Arbiter containers' resource requests and limits. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param arbiter.resources.limits The resources limits for Arbiter containers + ## @param arbiter.resources.requests The requested resources for Arbiter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## @param arbiter.containerPorts.mongodb MongoDB(®) arbiter container port + ## + containerPorts: + mongodb: 27017 + ## MongoDB(®) Arbiter pods' liveness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.livenessProbe.enabled Enable livenessProbe + ## @param arbiter.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param arbiter.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param arbiter.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param arbiter.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param arbiter.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Arbiter pods' readiness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.readinessProbe.enabled Enable readinessProbe + ## @param arbiter.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param arbiter.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param arbiter.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param arbiter.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param arbiter.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Arbiter pods' startup probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param arbiter.startupProbe.enabled Enable startupProbe + ## @param arbiter.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param arbiter.startupProbe.periodSeconds Period seconds for startupProbe + ## @param arbiter.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param arbiter.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param arbiter.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param arbiter.customLivenessProbe Override default liveness probe for Arbiter containers + ## Ignored when arbiter.livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param arbiter.customReadinessProbe Override default readiness probe for Arbiter containers + ## Ignored when arbiter.readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param arbiter.customStartupProbe Override default startup probe for Arbiter containers + ## Ignored when arbiter.startupProbe.enabled=true + ## + customStartupProbe: {} + ## @param arbiter.initContainers Add additional init containers for the Arbiter pod(s) + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param arbiter.sidecars Add additional sidecar containers for the Arbiter pod(s) + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param arbiter.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Arbiter container(s) + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## + extraVolumeMounts: [] + ## @param arbiter.extraVolumes Optionally specify extra list of additional volumes to the Arbiter statefulset + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + ## + extraVolumes: [] + ## MongoDB(®) Arbiter Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param arbiter.pdb.create Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) + ## + create: false + ## @param arbiter.pdb.minAvailable Minimum number/percentage of Arbiter pods that should remain scheduled + ## + minAvailable: 1 + ## @param arbiter.pdb.maxUnavailable Maximum number/percentage of Arbiter pods that may be made unavailable + ## + maxUnavailable: "" + ## MongoDB(®) Arbiter service parameters + ## + service: + ## @param arbiter.service.nameOverride The arbiter service name + ## + nameOverride: "" + ## @param arbiter.service.ports.mongodb MongoDB(®) service port + ## + ports: + mongodb: 27017 + ## @param arbiter.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param arbiter.service.annotations Provide any additional annotations that may be required + ## + annotations: {} + ## Headless service properties + ## + headless: + ## @param arbiter.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + +## @section Hidden Node parameters +## + +hidden: + ## @param hidden.enabled Enable deploying the hidden nodes + ## https://docs.mongodb.com/manual/tutorial/configure-a-hidden-replica-set-member/ + ## + enabled: false + ## @param hidden.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param hidden.configuration Hidden node configuration file to be used + ## http://docs.mongodb.org/manual/reference/configuration-options/ + ## + configuration: "" + ## @param hidden.existingConfigmap Name of existing ConfigMap with Hidden node configuration + ## NOTE: When it's set the hidden.configuration parameter is ignored + ## + existingConfigmap: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param hidden.command Override default container command (useful when using custom images) + ## @param hidden.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## @param hidden.extraFlags Hidden node additional command line flags + ## Example: + ## extraFlags: + ## - "--wiredTigerCacheSizeGB=2" + ## + extraFlags: [] + ## @param hidden.extraEnvVars Extra environment variables to add to Hidden node pods + ## E.g: + ## extraEnvVars: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param hidden.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param hidden.extraEnvVarsSecret Name of existing Secret containing extra env vars (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param hidden.annotations Additional labels to be added to thehidden node statefulset + ## + annotations: {} + ## @param hidden.labels Annotations to be added to the hidden node statefulset + ## + labels: {} + ## @param hidden.topologySpreadConstraints MongoDB(®) Spread Constraints for hidden Pods + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + ## @param hidden.lifecycleHooks LifecycleHook for the Hidden container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param hidden.replicaCount Number of hidden nodes (only when `architecture=replicaset`) + ## Ignored when mongodb.architecture=standalone + ## + replicaCount: 1 + ## @param hidden.terminationGracePeriodSeconds Hidden Termination Grace Period + ## + terminationGracePeriodSeconds: "" + ## @param hidden.updateStrategy.type Strategy that will be employed to update Pods in the StatefulSet + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: RollingUpdate + ## @param hidden.podManagementPolicy Pod management policy for hidden node + ## + podManagementPolicy: OrderedReady + ## @param hidden.schedulerName Name of the scheduler (other than default) to dispatch pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param hidden.podAffinityPreset Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param hidden.podAntiAffinityPreset Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Allowed values: soft, hard + ## + nodeAffinityPreset: + ## @param hidden.nodeAffinityPreset.type Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param hidden.nodeAffinityPreset.key Hidden Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param hidden.nodeAffinityPreset.values Hidden Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param hidden.affinity Hidden node Affinity for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param hidden.nodeSelector Hidden node Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param hidden.tolerations Hidden node Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param hidden.podLabels Hidden node pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param hidden.podAnnotations Hidden node Pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param hidden.priorityClassName Name of the existing priority class to be used by hidden node pod(s) + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param hidden.runtimeClassName Name of the runtime class to be used by hidden node pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## MongoDB(®) Hidden pods' Security Context. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param hidden.podSecurityContext.enabled Enable Hidden pod(s)' Security Context + ## @param hidden.podSecurityContext.fsGroup Group ID for the volumes of the Hidden pod(s) + ## @param hidden.podSecurityContext.sysctls sysctl settings of the Hidden pod(s)' + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings + ## Example: + ## sysctls: + ## - name: net.core.somaxconn + ## value: "10000" + ## + sysctls: [] + ## MongoDB(®) Hidden containers' Security Context (only main container). + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param hidden.containerSecurityContext.enabled Enable Hidden container(s)' Security Context + ## @param hidden.containerSecurityContext.runAsUser User ID for the Hidden container + ## @param hidden.containerSecurityContext.runAsGroup Group ID for the Hidden container + ## @param hidden.containerSecurityContext.runAsNonRoot Set Hidden containers' Security Context runAsNonRoot + ## @param hidden.containerSecurityContext.allowPrivilegeEscalation Set Hidden containers' Security Context allowPrivilegeEscalation + ## @param hidden.containerSecurityContext.seccompProfile.type Set Hidden container's Security Context seccompProfile type + ## @param hidden.containerSecurityContext.capabilities.drop Set Hidden container's Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## MongoDB(®) Hidden containers' resource requests and limits. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param hidden.resources.limits The resources limits for hidden node containers + ## @param hidden.resources.requests The requested resources for hidden node containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## @param hidden.containerPorts.mongodb MongoDB(®) hidden container port + ## + containerPorts: + mongodb: 27017 + ## MongoDB(®) Hidden pods' liveness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param hidden.livenessProbe.enabled Enable livenessProbe + ## @param hidden.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param hidden.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param hidden.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param hidden.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param hidden.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## MongoDB(®) Hidden pods' readiness probe. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## @param hidden.readinessProbe.enabled Enable readinessProbe + ## @param hidden.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param hidden.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param hidden.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param hidden.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param hidden.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 20 + timeoutSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + ## Slow starting containers can be protected through startup probes + ## Startup probes are available in Kubernetes version 1.16 and above + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + ## @param hidden.startupProbe.enabled Enable startupProbe + ## @param hidden.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param hidden.startupProbe.periodSeconds Period seconds for startupProbe + ## @param hidden.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param hidden.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param hidden.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param hidden.customLivenessProbe Override default liveness probe for hidden node containers + ## Ignored when hidden.livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param hidden.customReadinessProbe Override default readiness probe for hidden node containers + ## Ignored when hidden.readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param hidden.customStartupProbe Override default startup probe for MongoDB(®) containers + ## Ignored when hidden.startupProbe.enabled=true + ## + customStartupProbe: {} + ## @param hidden.initContainers Add init containers to the MongoDB(®) Hidden pods. + ## Example: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param hidden.sidecars Add additional sidecar containers for the hidden node pod(s) + ## Example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param hidden.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the hidden node container(s) + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## + extraVolumeMounts: [] + ## @param hidden.extraVolumes Optionally specify extra list of additional volumes to the hidden node statefulset + ## extraVolumes: + ## - name: extras + ## emptyDir: {} + ## + extraVolumes: [] + ## MongoDB(®) Hidden Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + ## + pdb: + ## @param hidden.pdb.create Enable/disable a Pod Disruption Budget creation for hidden node pod(s) + ## + create: false + ## @param hidden.pdb.minAvailable Minimum number/percentage of hidden node pods that should remain scheduled + ## + minAvailable: 1 + ## @param hidden.pdb.maxUnavailable Maximum number/percentage of hidden node pods that may be made unavailable + ## + maxUnavailable: "" + ## Enable persistence using Persistent Volume Claims + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param hidden.persistence.enabled Enable hidden node data persistence using PVC + ## + enabled: true + ## @param hidden.persistence.medium Provide a medium for `emptyDir` volumes. + ## Requires hidden.persistence.enabled: false + ## + medium: "" + ## @param hidden.persistence.storageClass PVC Storage Class for hidden node data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param hidden.persistence.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param hidden.persistence.size PVC Storage Request for hidden node data volume + ## + size: 8Gi + ## @param hidden.persistence.annotations PVC annotations + ## + annotations: {} + ## @param hidden.persistence.mountPath The path the volume will be mounted at, useful when using different MongoDB(®) images. + ## + mountPath: /bitnami/mongodb + ## @param hidden.persistence.subPath The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param hidden.persistence.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param hidden.persistence.volumeClaimTemplates.requests Custom PVC requests attributes + ## Sometime cloud providers use additional requests attributes to provision custom storage instance + ## See https://cloud.ibm.com/docs/containers?topic=containers-file_storage#file_dynamic_statefulset + ## + requests: {} + ## @param hidden.persistence.volumeClaimTemplates.dataSource Set volumeClaimTemplate dataSource + ## + dataSource: {} + service: + ## @param hidden.service.portName MongoDB(®) service port name + ## + portName: "mongodb" + ## @param hidden.service.ports.mongodb MongoDB(®) service port + ## + ports: + mongodb: 27017 + ## @param hidden.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param hidden.service.annotations Provide any additional annotations that may be required + ## + annotations: {} + ## Headless service properties + ## + headless: + ## @param hidden.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + +## @section Metrics parameters +## + +metrics: + ## @param metrics.enabled Enable using a sidecar Prometheus exporter + ## + enabled: false + ## Bitnami MongoDB(®) Promtheus Exporter image + ## ref: https://hub.docker.com/r/bitnami/mongodb-exporter/tags/ + ## @param metrics.image.registry MongoDB(®) Prometheus exporter image registry + ## @param metrics.image.repository MongoDB(®) Prometheus exporter image repository + ## @param metrics.image.tag MongoDB(®) Prometheus exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest MongoDB(®) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy MongoDB(®) Prometheus exporter image pull policy + ## @param metrics.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/mongodb-exporter + tag: 0.39.0-debian-11-r100 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.username String with username for the metrics exporter + ## If undefined the root user will be used for the metrics exporter + ## + username: "" + ## @param metrics.password String with password for the metrics exporter + ## If undefined but metrics.username is defined, a random password will be generated + ## + password: "" + ## @param metrics.compatibleMode Enables old style mongodb-exporter metrics + compatibleMode: true + + collector: + ## @param metrics.collector.all Enable all collectors. Same as enabling all individual metrics + ## Enabling all metrics will cause significant CPU load on mongod + all: false + ## @param metrics.collector.diagnosticdata Boolean Enable collecting metrics from getDiagnosticData + diagnosticdata: true + ## @param metrics.collector.replicasetstatus Boolean Enable collecting metrics from replSetGetStatus + replicasetstatus: true + ## @param metrics.collector.dbstats Boolean Enable collecting metrics from dbStats + dbstats: false + ## @param metrics.collector.topmetrics Boolean Enable collecting metrics from top admin command + topmetrics: false + ## @param metrics.collector.indexstats Boolean Enable collecting metrics from $indexStats + indexstats: false + ## @param metrics.collector.collstats Boolean Enable collecting metrics from $collStats + collstats: false + ## @param metrics.collector.collstatsColls List of \.\ to get $collStats + collstatsColls: [] + ## @param metrics.collector.indexstatsColls List - List of \.\ to get $indexStats + indexstatsColls: [] + ## @param metrics.collector.collstatsLimit Number - Disable collstats, dbstats, topmetrics and indexstats collector if there are more than \ collections. 0=No limit + collstatsLimit: 0 + + ## @param metrics.extraFlags String with extra flags to the metrics exporter + ## ref: https://github.com/percona/mongodb_exporter/blob/main/main.go + ## + extraFlags: "" + ## Command and args for running the container (set to default if not set). Use array form + ## @param metrics.command Override default container command (useful when using custom images) + ## @param metrics.args Override default container args (useful when using custom images) + ## + command: [] + args: [] + ## Metrics exporter container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param metrics.resources.limits The resources limits for Prometheus exporter containers + ## @param metrics.resources.requests The requested resources for Prometheus exporter containers + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## @param metrics.containerPort Port of the Prometheus metrics container + ## + containerPort: 9216 + ## Prometheus Exporter service configuration + ## + service: + ## @param metrics.service.annotations [object] Annotations for Prometheus Exporter pods. Evaluated as a template. + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## @param metrics.service.type Type of the Prometheus metrics service + ## + type: ClusterIP + ## @param metrics.service.ports.metrics Port of the Prometheus metrics service + ## + ports: + metrics: 9216 + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## Metrics exporter liveness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.livenessProbe.enabled Enable livenessProbe + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 15 + periodSeconds: 5 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + ## Metrics exporter readiness probe + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## @param metrics.readinessProbe.enabled Enable readinessProbe + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + ## Slow starting containers can be protected through startup probes + ## Startup probes are available in Kubernetes version 1.16 and above + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes + ## @param metrics.startupProbe.enabled Enable startupProbe + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 30 + ## @param metrics.customLivenessProbe Override default liveness probe for MongoDB(®) containers + ## Ignored when livenessProbe.enabled=true + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Override default readiness probe for MongoDB(®) containers + ## Ignored when readinessProbe.enabled=true + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Override default startup probe for MongoDB(®) containers + ## Ignored when startupProbe.enabled=true + ## + customStartupProbe: {} + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the metrics container(s) + ## Examples: + ## extraVolumeMounts: + ## - name: extras + ## mountPath: /usr/share/extras + ## readOnly: true + ## + extraVolumeMounts: [] + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace which Prometheus is running in + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.labels Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace Namespace where prometheusRules resource should be created + ## + namespace: "" + ## @param metrics.prometheusRule.rules Rules to be created, check values for an example + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#rulegroup + ## https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + ## + ## This is an example of a rule, you should add the below code block under the "rules" param, removing the brackets + ## rules: + ## - alert: HighRequestLatency + ## expr: job:request_latency_seconds:mean5m{job="myjob"} > 0.5 + ## for: 10m + ## labels: + ## severity: page + ## annotations: + ## summary: High request latency + ## + rules: [] diff --git a/install-bundler/install-prometheus-adapter/custom-rules.yaml b/install-bundler/install-prometheus-adapter/custom-rules.yaml new file mode 100644 index 00000000..7b50414f --- /dev/null +++ b/install-bundler/install-prometheus-adapter/custom-rules.yaml @@ -0,0 +1,16 @@ +rules: + custom: + - seriesQuery: '{__name__=~"process_cpu_seconds_total",app="chain-80001"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_total" + as: "80001-cpu-usage" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[30s])) by (<<.GroupBy>>)' + - seriesQuery: '{__name__=~"eth_sendUserOperation_requests", code="200", rpc_method="eth_sendUserOperation", app="chain-80001"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_requests" # Assumes the metric in Prometheus is 'eth_sendUserOperation_requests' + as: "80001-http-requests" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[30s])) by (<<.GroupBy>>)' diff --git a/install-bundler/install-prometheus-adapter/custom-values.sh b/install-bundler/install-prometheus-adapter/custom-values.sh new file mode 100644 index 00000000..05b74303 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/custom-values.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env zsh + +# Define chains and replicas + +CHAINS=("$@") +# declare -A chains +# chains=( ["chain-43113"]="3" ["chain-80001"]="2" ) +# Start the rules section of the YAML +echo "rules:" +echo " custom:" + +# Loop through each chain and generate the rules +for chainId in "${CHAINS[@]}"; do + replicas=$chains[$chain] + + # Rule for process_cpu_seconds_total + cat <<-EOF + - seriesQuery: '{__name__=~"process_cpu_seconds_total",app="chain-${chainId}"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_total" + as: "${chainId}-cpu-usage" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[30s])) by (<<.GroupBy>>)' +EOF + + # Rule for http_requests_total + cat <<-EOF + - seriesQuery: '{__name__=~"eth_sendUserOperation_requests", code="200", rpc_method="eth_sendUserOperation", app="chain-${chainId}"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_requests" # Assumes the metric in Prometheus is 'eth_sendUserOperation_requests' + as: "${chainId}-http-requests" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[30s])) by (<<.GroupBy>>)' +EOF +done \ No newline at end of file diff --git a/install-bundler/install-prometheus-adapter/install.sh b/install-bundler/install-prometheus-adapter/install.sh new file mode 100644 index 00000000..7867d8d5 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/install.sh @@ -0,0 +1,53 @@ + +RED='\033[0;31m' +NC='\033[0m' +GREEN='\033[0;32m' +SECONDS=0 +ENV=$1 +NAMESPACE=$2 +CHAINS_CFG_FILENAME=$3 +# shift 5 +# CHAINS=("$@") + +HELM_RELEASE="monitoring-adapter"; + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PARENT_DIR="$(dirname "$DIR")" + + +source "$PARENT_DIR/configs/chains/$CHAINS_CFG_FILENAME" + +array_names=$(declare -p | grep -Eo 'chain_[a-zA-Z0-9_]+') + +# Iterate over these names and extract the chainId values +declare -a CHAIN_IDS +for array_name in $array_names; do + eval "chain_id=\${$array_name[chainId]}" + CHAIN_IDS+=("$chain_id") +done + +echo "${CHAIN_IDS[@]}" +sh "$DIR/custom-values.sh" "${CHAIN_IDS[@]}" > "$DIR/custom-rules.yaml" + +echo -e "\n${GREEN} ####### Deploying Prometheus adopter $HELM_RELEASE to $NAMESPACE ${NC} ####### \n"; + +PROMETHEUS_URL="http://prometheus-server.monitoring.svc.cluster.local" + +echo -e "\n${RED} The Prometheus adapter PROMETHEUS URL=<<$PROMETHEUS_URL>> \ + according to format <>, Please change it if required ${NC}\n"; + +# Prompt user for confirmation +read -p "Is the PROMETHEUS URL correct? (y/n) " -n 1 -r + +helm upgrade --install --wait --timeout 120s $HELM_RELEASE "$DIR/prometheus-adapter/" \ + -n "$NAMESPACE" \ + -f "$DIR/prometheus-adapter/values.yaml" \ + -f "$DIR/custom-rules.yaml" \ + --set nameOverride=$HELM_RELEASE \ + --set prometheus.url=$PROMETHEUS_URL \ + --set prometheus.port=80 \ + --set replicas=2 + +echo -e "Prometheus-adapter Deployment completed in $SECONDS"; + +## {rpc_method="eth_sendUserOperation", app="chain-137", code="200"} \ No newline at end of file diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/.helmignore b/install-bundler/install-prometheus-adapter/prometheus-adapter/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/Chart.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/Chart.yaml new file mode 100644 index 00000000..61dc83e3 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +appVersion: v0.11.1 +description: A Helm chart for k8s prometheus adapter +home: https://github.com/kubernetes-sigs/prometheus-adapter +keywords: +- hpa +- metrics +- prometheus +- adapter +maintainers: +- email: mattias.gees@jetstack.io + name: mattiasgees +- name: steven-sheehy +- email: hfernandez@mesosphere.com + name: hectorj2f +name: prometheus-adapter +sources: +- https://github.com/kubernetes/charts +- https://github.com/kubernetes-sigs/prometheus-adapter +version: 4.5.0 diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/README.md b/install-bundler/install-prometheus-adapter/prometheus-adapter/README.md new file mode 100644 index 00000000..d77bb0c9 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/README.md @@ -0,0 +1,160 @@ +# Prometheus Adapter + +Installs the [Prometheus Adapter](https://github.com/kubernetes-sigs/prometheus-adapter) for the Custom Metrics API. Custom metrics are used in Kubernetes by [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale workloads based upon your own metric pulled from an external metrics provider like Prometheus. This chart complements the [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server) chart that provides resource only metrics. + +## Prerequisites + +Kubernetes 1.14+ + +## Get Helm Repositories Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Helm Chart + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus-adapter +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Helm Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Helm Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 4.2.0 + +Readiness and liveness probes are now fully configurable through values `readinessProbe` and `livenessProbe`. The previous values have been kept as defaults. + +### To 4.0.0 + +Previously, security context of the container was set directly in the deployment template. This release makes it configurable through the new configuration variable `securityContext` whilst keeping the previously set values as defaults. Furthermore, previous variable `runAsUser` is now set in `securityContext` and is not used any longer. Please, use `securityContext.runAsUser` instead. In the same security context, `seccompProfile` has been enabled and set to type `RuntimeDefault`. + +### To 3.0.0 + +Due to a change in deployment labels, the upgrade requires `helm upgrade --force` in order to re-create the deployment. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus-adapter +``` + +### Prometheus Service Endpoint + +To use the chart, ensure the `prometheus.url` and `prometheus.port` are configured with the correct Prometheus service endpoint. If Prometheus is exposed under HTTPS the host's CA Bundle must be exposed to the container using `extraVolumes` and `extraVolumeMounts`. + +### Adapter Rules + +Additionally, the chart comes with a set of default rules out of the box but they may pull in too many metrics or not map them correctly for your needs. Therefore, it is recommended to populate `rules.custom` with a list of rules (see the [config document](https://github.com/kubernetes-sigs/prometheus-adapter/blob/master/docs/config.md) for the proper format). + +### Horizontal Pod Autoscaler Metrics + +Finally, to configure your Horizontal Pod Autoscaler to use the custom metric, see the custom metrics section of the [HPA walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics). + +The Prometheus Adapter can serve three different [metrics APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis): + +### Custom Metrics + +Enabling this option will cause custom metrics to be served at `/apis/custom.metrics.k8s.io/v1beta1`. Enabled by default when `rules.default` is true, but can be customized by populating `rules.custom`: + +```yaml +rules: + custom: + - seriesQuery: '{__name__=~"^some_metric_count$"}' + resources: + template: <<.Resource>> + name: + matches: "" + as: "my_custom_metric" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) +``` + +### External Metrics + +Enabling this option will cause external metrics to be served at `/apis/external.metrics.k8s.io/v1beta1`. Can be enabled by populating `rules.external`: + +```yaml +rules: + external: + - seriesQuery: '{__name__=~"^some_metric_count$"}' + resources: + template: <<.Resource>> + name: + matches: "" + as: "my_external_metric" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) +``` + +### Resource Metrics + +Enabling this option will cause resource metrics to be served at `/apis/metrics.k8s.io/v1beta1`. Resource metrics will allow pod CPU and Memory metrics to be used in [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) as well as the `kubectl top` command. Can be enabled by populating `rules.resource`: + +```yaml +rules: + resource: + cpu: + containerQuery: | + sum by (<<.GroupBy>>) ( + rate(container_cpu_usage_seconds_total{container!="",<<.LabelMatchers>>}[3m]) + ) + nodeQuery: | + sum by (<<.GroupBy>>) ( + rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal",<<.LabelMatchers>>}[3m]) + ) + resources: + overrides: + node: + resource: node + namespace: + resource: namespace + pod: + resource: pod + containerLabel: container + memory: + containerQuery: | + sum by (<<.GroupBy>>) ( + avg_over_time(container_memory_working_set_bytes{container!="",<<.LabelMatchers>>}[3m]) + ) + nodeQuery: | + sum by (<<.GroupBy>>) ( + avg_over_time(node_memory_MemTotal_bytes{<<.LabelMatchers>>}[3m]) + - + avg_over_time(node_memory_MemAvailable_bytes{<<.LabelMatchers>>}[3m]) + ) + resources: + overrides: + node: + resource: node + namespace: + resource: namespace + pod: + resource: pod + containerLabel: container + window: 3m +``` + +**NOTE:** Setting a value for `rules.resource` will also deploy the resource metrics API service, providing the same functionality as [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server). As such it is not possible to deploy them both in the same cluster. diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/ci/default-values.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/ci/default-values.yaml new file mode 100644 index 00000000..e69de29b diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/ci/external-rules-values.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/ci/external-rules-values.yaml new file mode 100644 index 00000000..2dafb562 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/ci/external-rules-values.yaml @@ -0,0 +1,9 @@ +rules: + external: + - seriesQuery: '{__name__=~"^some_metric_count$"}' + resources: + template: <<.Resource>> + name: + matches: "" + as: "my_custom_metric" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/NOTES.txt b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/NOTES.txt new file mode 100644 index 00000000..b7b9b993 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{ template "k8s-prometheus-adapter.fullname" . }} has been deployed. +In a few minutes you should be able to list metrics using the following command(s): +{{ if .Values.rules.resource }} + kubectl get --raw /apis/metrics.k8s.io/v1beta1 +{{- end }} + kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 +{{ if .Values.rules.external }} + kubectl get --raw /apis/external.metrics.k8s.io/v1beta1 +{{- end }} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/_helpers.tpl b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/_helpers.tpl new file mode 100644 index 00000000..178c00b6 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/_helpers.tpl @@ -0,0 +1,84 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-prometheus-adapter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8s-prometheus-adapter.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "k8s-prometheus-adapter.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8s-prometheus-adapter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate basic labels +*/}} +{{- define "k8s-prometheus-adapter.labels" }} +helm.sh/chart: {{ include "k8s-prometheus-adapter.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ template "k8s-prometheus-adapter.name" . }} +{{- include "k8s-prometheus-adapter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "k8s-prometheus-adapter.selectorLabels" }} +app.kubernetes.io/name: {{ include "k8s-prometheus-adapter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "k8s-prometheus-adapter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "k8s-prometheus-adapter.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* Get Policy API Version */}} +{{- define "k8s-prometheus-adapter.pdb.apiVersion" -}} +{{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} +{{- else -}} + {{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/certmanager.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/certmanager.yaml new file mode 100644 index 00000000..4e32c964 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/certmanager.yaml @@ -0,0 +1,76 @@ +{{- if .Values.certManager.enabled -}} +--- +# Create a selfsigned Issuer, in order to create a root CA certificate for +# signing webhook serving certificates +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + selfSigned: {} +--- +# Generate a CA Certificate used to sign certificates for the webhook +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert + duration: {{ .Values.certManager.caCertDuration }} + issuerRef: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer + commonName: "ca.webhook.prometheus-adapter" + isCA: true +--- +# Create an Issuer that uses the above generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + ca: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert +--- +# Finally, generate a serving certificate for the apiservices to use +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-cert + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }} + duration: {{ .Values.certManager.certDuration }} + issuerRef: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer + dnsNames: + - {{ template "k8s-prometheus-adapter.fullname" . }} + - {{ template "k8s-prometheus-adapter.fullname" . }}.{{ include "k8s-prometheus-adapter.namespace" . }} + - {{ template "k8s-prometheus-adapter.fullname" . }}.{{ include "k8s-prometheus-adapter.namespace" . }}.svc +{{- end -}} \ No newline at end of file diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml new file mode 100644 index 00000000..6701e6ba --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-system-auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml new file mode 100644 index 00000000..67efd2aa --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-resource-reader.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-resource-reader.yaml new file mode 100644 index 00000000..2c690a03 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/cluster-role-resource-reader.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + - services + - configmaps + verbs: + - get + - list + - watch +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/configmap.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/configmap.yaml new file mode 100644 index 00000000..17f415d9 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/configmap.yaml @@ -0,0 +1,97 @@ +{{- if not .Values.rules.existing -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +data: + config.yaml: | +{{- if or .Values.rules.default .Values.rules.custom }} + rules: +{{- if .Values.rules.default }} + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: [] + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_seconds_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)$ + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_total$ + resources: + template: <<.Resource>> + name: + matches: "" + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_seconds_total + resources: + template: <<.Resource>> + name: + matches: ^(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: [] + resources: + template: <<.Resource>> + name: + matches: ^(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) +{{- end -}} +{{- if .Values.rules.custom }} +{{ toYaml .Values.rules.custom | indent 4 }} +{{- end -}} +{{- end -}} +{{- if .Values.rules.external }} + externalRules: +{{ toYaml .Values.rules.external | indent 4 }} +{{- end -}} +{{- if .Values.rules.resource }} + resourceRules: +{{ toYaml .Values.rules.resource | indent 6 }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-apiservice.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-apiservice.yaml new file mode 100644 index 00000000..8b7b4e55 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-apiservice.yaml @@ -0,0 +1,34 @@ +{{- if or .Values.rules.default .Values.rules.custom }} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if or .Values.certManager.enabled .Values.customAnnotations }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} +{{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: v1beta1.custom.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: custom.metrics.k8s.io + version: v1beta1 + {{- if not (or .Values.tls.enable .Values.certManager.enabled) }} + insecureSkipTLSVerify: true + {{- end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end }} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml new file mode 100644 index 00000000..0cc69208 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml @@ -0,0 +1,24 @@ +{{- /* +This if must be aligned with custom-metrics-cluster-role.yaml +as otherwise this binding will point to not existing role. +*/ -}} +{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role.yaml new file mode 100644 index 00000000..4aa15ffe --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/custom-metrics-cluster-role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources +rules: +- apiGroups: + - custom.metrics.k8s.io + resources: ["*"] + verbs: ["*"] +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/deployment.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/deployment.yaml new file mode 100644 index 00000000..8068180b --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/deployment.yaml @@ -0,0 +1,139 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + {{- if or .Values.customAnnotations .Values.deploymentAnnotations }} + annotations: + {{- with .Values.customAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.deploymentAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} +spec: + replicas: {{ .Values.replicas }} + strategy: {{ toYaml .Values.strategy | nindent 4 }} + selector: + matchLabels: + {{- include "k8s-prometheus-adapter.selectorLabels" . | indent 6 }} + template: + metadata: + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | trim | nindent 8 }} + {{- end }} + name: {{ template "k8s-prometheus-adapter.name" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.customAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + {{- if .Values.hostNetwork.enabled }} + hostNetwork: true + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end}} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.env }} + env: + {{- toYaml . | nindent 8 }} + {{- end }} + args: + - /adapter + - --secure-port={{ .Values.listenPort }} + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - --tls-cert-file=/var/run/serving-cert/tls.crt + - --tls-private-key-file=/var/run/serving-cert/tls.key + {{- end }} + - --cert-dir=/tmp/cert + - --prometheus-url={{ tpl .Values.prometheus.url . }}{{ if .Values.prometheus.port }}:{{ .Values.prometheus.port }}{{end}}{{ .Values.prometheus.path }} + - --metrics-relist-interval={{ .Values.metricsRelistInterval }} + - --v={{ .Values.logLevel }} + - --config=/etc/adapter/config.yaml + {{- if .Values.extraArguments }} + {{- toYaml .Values.extraArguments | trim | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.listenPort }} + name: https + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + volumeMounts: + {{- if .Values.extraVolumeMounts }} + {{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }} + {{ end }} + - mountPath: /etc/adapter/ + name: config + readOnly: true + - mountPath: /tmp + name: tmp + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - mountPath: /var/run/serving-cert + name: volume-serving-cert + readOnly: true + {{- end }} + nodeSelector: + {{- toYaml .Values.nodeSelector | nindent 8 }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + topologySpreadConstraints: + {{- toYaml .Values.topologySpreadConstraints | nindent 8 }} + priorityClassName: {{ .Values.priorityClassName }} + {{- if .Values.podSecurityContext }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + tolerations: + {{- toYaml .Values.tolerations | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + volumes: + {{- if .Values.extraVolumes }} + {{ toYaml .Values.extraVolumes | trim | nindent 6 }} + {{ end }} + - name: config + configMap: + name: {{ .Values.rules.existing | default (include "k8s-prometheus-adapter.fullname" . ) }} + - name: tmp + emptyDir: {} + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - name: volume-serving-cert + secret: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }} + {{- end }} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-apiservice.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-apiservice.yaml new file mode 100644 index 00000000..21339af1 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-apiservice.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rules.external }} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if or .Values.certManager.enabled .Values.customAnnotations }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} +{{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: v1beta1.external.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: external.metrics.k8s.io + version: v1beta1 + {{- if not (or .Values.tls.enable .Values.certManager.enabled) }} + insecureSkipTLSVerify: true + {{- end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml new file mode 100644 index 00000000..05547bd3 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rules.external -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-external-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role.yaml new file mode 100644 index 00000000..212ea78b --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/external-metrics-cluster-role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.rbac.create .Values.rules.external -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics +rules: +- apiGroups: + - "external.metrics.k8s.io" + resources: + - "*" + verbs: + - list + - get + - watch +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/pdb.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/pdb.yaml new file mode 100644 index 00000000..205761a9 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/pdb.yaml @@ -0,0 +1,23 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: {{ include "k8s-prometheus-adapter.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "k8s-prometheus-adapter.selectorLabels" . | indent 6 }} +{{- end }} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/psp.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/psp.yaml new file mode 100644 index 00000000..3f3f79e5 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/psp.yaml @@ -0,0 +1,66 @@ +{{- if and .Values.psp.create (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} +spec: + {{- if .Values.hostNetwork.enabled }} + hostNetwork: true + hostPorts: + - min: {{ .Values.listenPort }} + max: {{ .Values.listenPort }} + {{- end }} + fsGroup: + rule: RunAsAny + runAsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAs + ranges: + - min: 1024 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - emptyDir + - configMap +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +rules: +- apiGroups: + - 'policy' + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "k8s-prometheus-adapter.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-apiservice.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-apiservice.yaml new file mode 100644 index 00000000..0cc9fff6 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-apiservice.yaml @@ -0,0 +1,34 @@ +{{- if .Values.rules.resource}} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if or .Values.certManager.enabled .Values.customAnnotations }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" (include "k8s-prometheus-adapter.namespace" .) (include "k8s-prometheus-adapter.fullname" .) | quote }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} +{{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: v1beta1.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: metrics.k8s.io + version: v1beta1 + {{- if not (or .Values.tls.enable .Values.certManager.enabled) }} + insecureSkipTLSVerify: true + {{- end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml new file mode 100644 index 00000000..3c247e48 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rules.resource -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-metrics +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role.yaml new file mode 100644 index 00000000..73d89530 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/resource-metrics-cluster-role.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.rbac.create .Values.rules.resource -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-metrics +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/role-binding-auth-reader.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/role-binding-auth-reader.yaml new file mode 100644 index 00000000..d3c77c1c --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/role-binding-auth-reader.yaml @@ -0,0 +1,21 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.name" . }}-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . | quote }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/secret.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/secret.yaml new file mode 100644 index 00000000..3e7e8887 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/secret.yaml @@ -0,0 +1,17 @@ +{{- if .Values.tls.enable -}} +apiVersion: v1 +kind: Secret +metadata: + {{- if .Values.customAnnotations }} + annotations: + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} +type: kubernetes.io/tls +data: + tls.crt: {{ b64enc .Values.tls.certificate }} + tls.key: {{ b64enc .Values.tls.key }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/service.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/service.yaml new file mode 100644 index 00000000..bd673245 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/service.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Service +metadata: + {{- if or .Values.service.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ toYaml .Values.service.annotations | indent 4 }} + {{- end }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- end }} + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} +spec: +{{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} +{{- end }} + ports: + - port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + {{- include "k8s-prometheus-adapter.selectorLabels" . | indent 4 }} + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/serviceaccount.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/serviceaccount.yaml new file mode 100644 index 00000000..30a169ae --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "k8s-prometheus-adapter.labels" . | indent 4 }} + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ include "k8s-prometheus-adapter.namespace" . }} +{{- if or .Values.serviceAccount.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.serviceAccount.annotations }} + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} +{{- end }} +{{- end -}} diff --git a/install-bundler/install-prometheus-adapter/prometheus-adapter/values.yaml b/install-bundler/install-prometheus-adapter/prometheus-adapter/values.yaml new file mode 100644 index 00000000..d3135cd5 --- /dev/null +++ b/install-bundler/install-prometheus-adapter/prometheus-adapter/values.yaml @@ -0,0 +1,274 @@ +affinity: {} + +topologySpreadConstraints: [] + +image: + repository: registry.k8s.io/prometheus-adapter/prometheus-adapter + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + +logLevel: 4 + +metricsRelistInterval: 1m + +listenPort: 6443 + +nodeSelector: {} + +priorityClassName: "" + +## Override the release namespace (for multi-namespace deployments in combined charts) +namespaceOverride: "" + +## Additional annotations to add to all resources +customAnnotations: {} + # role: custom-metrics + +## Additional labels to add to all resources +customLabels: {} + # monitoring: prometheus-adapter + +# Url to access prometheus +prometheus: + # Value is templated + url: http://prometheus.default.svc + port: 9090 + path: "" + +replicas: 2 + +# k8s 1.21 needs fsGroup to be set for non root deployments +# ref: https://github.com/kubernetes/kubernetes/issues/70679 +podSecurityContext: + fsGroup: 10001 + +# SecurityContext of the container +# ref. https://kubernetes.io/docs/tasks/configure-pod-container/security-context +securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["all"] + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + +rbac: + # Specifies whether RBAC resources should be created + create: true + +psp: + # Specifies whether PSP resources should be created + create: false + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +# Custom DNS configuration to be added to prometheus-adapter pods +dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + +resources: + requests: + cpu: 200m + memory: 128Mi + limits: + cpu: 400m + memory: 400Mi + +# Configure liveness probe +# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Probe +livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 5 + +# Configure readiness probe +readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 5 + +rules: + default: true + + custom: [] + # - seriesQuery: '{__name__=~"^some_metric_count$"}' + # resources: + # template: <<.Resource>> + # name: + # matches: "" + # as: "my_custom_metric" + # metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + + # Mounts a configMap with pre-generated rules for use. Overrides the + # default, custom, external and resource entries + existing: + + external: [] + # - seriesQuery: '{__name__=~"^some_metric_count$"}' + # resources: + # template: <<.Resource>> + # name: + # matches: "" + # as: "my_external_metric" + # metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + + # resource: + # cpu: + # containerQuery: | + # sum by (<<.GroupBy>>) ( + # rate(container_cpu_usage_seconds_total{container!="",<<.LabelMatchers>>}[3m]) + # ) + # nodeQuery: | + # sum by (<<.GroupBy>>) ( + # rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal",<<.LabelMatchers>>}[3m]) + # ) + # resources: + # overrides: + # node: + # resource: node + # namespace: + # resource: namespace + # pod: + # resource: pod + # containerLabel: container + # memory: + # containerQuery: | + # sum by (<<.GroupBy>>) ( + # avg_over_time(container_memory_working_set_bytes{container!="",<<.LabelMatchers>>}[3m]) + # ) + # nodeQuery: | + # sum by (<<.GroupBy>>) ( + # avg_over_time(node_memory_MemTotal_bytes{<<.LabelMatchers>>}[3m]) + # - + # avg_over_time(node_memory_MemAvailable_bytes{<<.LabelMatchers>>}[3m]) + # ) + # resources: + # overrides: + # node: + # resource: node + # namespace: + # resource: namespace + # pod: + # resource: pod + # containerLabel: container + # window: 3m + +service: + annotations: {} + port: 443 + type: ClusterIP + # clusterIP: 1.2.3.4 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" +tls: + enable: false + ca: |- + # Public CA file that signed the APIService + key: |- + # Private key of the APIService + certificate: |- + # Public key of the APIService + +# Set environment variables from secrets, configmaps or by setting them as name/value +env: [] + # - name: TMP_DIR + # value: /tmp + # - name: PASSWORD + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: password + # optional: false + +# Any extra arguments +extraArguments: [] + # - --tls-private-key-file=/etc/tls/tls.key + # - --tls-cert-file=/etc/tls/tls.crt + +# Any extra volumes +extraVolumes: [] + # - name: example-name + # hostPath: + # path: /path/on/host + # type: DirectoryOrCreate + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + # type: File + +# Any extra volume mounts +extraVolumeMounts: [] + # - name: example-name + # mountPath: /path/in/container + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +tolerations: [] + +# Labels added to the pod +podLabels: {} + +# Annotations added to the pod +podAnnotations: {} + +# Annotations added to the deployment +deploymentAnnotations: {} + +hostNetwork: + # Specifies if prometheus-adapter should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS. See also dnsPolicy + enabled: false + +# When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet +# dnsPolicy: ClusterFirstWithHostNet + +# Deployment strategy type +strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + +podDisruptionBudget: + # Specifies if PodDisruptionBudget should be enabled + # When enabled, minAvailable or maxUnavailable should also be defined. + enabled: false + minAvailable: + maxUnavailable: 1 + +certManager: + enabled: false + caCertDuration: 43800h + certDuration: 8760h diff --git a/install-bundler/install-prometheus-adapter/reference-values.yaml b/install-bundler/install-prometheus-adapter/reference-values.yaml new file mode 100644 index 00000000..1e7774ba --- /dev/null +++ b/install-bundler/install-prometheus-adapter/reference-values.yaml @@ -0,0 +1,16 @@ +rules: + custom: + - seriesQuery: '{__name__=~"process_cpu_seconds_total",app="chain-137"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_total" + as: "137-cpu-usage" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)' + - seriesQuery: '{__name__=~"eth_sendUserOperation_requests", code="200", rpc_method="eth_sendUserOperation", app="chain-137"}' + resources: + template: <<.Resource>> + name: + matches: "^(.*)_requests" # Assumes the metric in Prometheus is 'eth_sendUserOperation_requests' + as: "137-http-requests" + metricsQuery: 'avg(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)' \ No newline at end of file diff --git a/install-bundler/install-prometheus/install.sh b/install-bundler/install-prometheus/install.sh new file mode 100644 index 00000000..f67d4e7a --- /dev/null +++ b/install-bundler/install-prometheus/install.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -e + +#A clauster should have only one prometheus +NAMESPACE="monitoring" +HELM_RELEASE="prometheus" + + +# if ! kubectl get namespace $NAMESPACE &>/dev/null; then +# printf "$NAMESPACE doesnt exists, aborting ...\n" +# exit +# fi +printf "\n${GREEN} ####### Deploying Prometheus $HELM_RELEASE to $NAMESPACE ${NC} ####### \n"; +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +helm upgrade --install --wait --timeout 720s $HELM_RELEASE $DIR/prometheus/ \ + -n $NAMESPACE \ + -f $DIR/prometheus/values.yaml \ + --set nameOverride=$HELM_RELEASE \ + --set alertmanager.enabled=false \ + --set kube-state-metrics.enabled=true \ + --set prometheus-node-exporter.enabled=false \ + --set prometheus-pushgateway.enabled=false +printf "Promethus deployment completed"; \ No newline at end of file diff --git a/install-bundler/install-prometheus/prometheus/.helmignore b/install-bundler/install-prometheus/prometheus/.helmignore new file mode 100644 index 00000000..825c0077 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +OWNERS diff --git a/install-bundler/install-prometheus/prometheus/Chart.lock b/install-bundler/install-prometheus/prometheus/Chart.lock new file mode 100644 index 00000000..830add59 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: alertmanager + repository: https://prometheus-community.github.io/helm-charts + version: 1.6.0 +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.11.0 +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.22.0 +- name: prometheus-pushgateway + repository: https://prometheus-community.github.io/helm-charts + version: 2.4.0 +digest: sha256:1aa107e6e124974678660600ac0260321e81887a64fb7c0fd85d34a0bbb95e7d +generated: "2023-08-26T22:57:53.836095+09:00" diff --git a/install-bundler/install-prometheus/prometheus/Chart.yaml b/install-bundler/install-prometheus/prometheus/Chart.yaml new file mode 100644 index 00000000..9fdbed98 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/Chart.yaml @@ -0,0 +1,53 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts + - name: Upstream Project + url: https://github.com/prometheus/prometheus +apiVersion: v2 +appVersion: v2.46.0 +dependencies: +- condition: alertmanager.enabled + name: alertmanager + repository: https://prometheus-community.github.io/helm-charts + version: 1.6.* +- condition: kube-state-metrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.11.* +- condition: prometheus-node-exporter.enabled + name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.22.* +- condition: prometheus-pushgateway.enabled + name: prometheus-pushgateway + repository: https://prometheus-community.github.io/helm-charts + version: 2.4.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- monitoring +- prometheus +kubeVersion: '>=1.19.0-0' +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: naseem@transit.app + name: naseemkullah +- email: rootsandtrees@posteo.de + name: zeritti +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 24.3.1 diff --git a/install-bundler/install-prometheus/prometheus/README.md b/install-bundler/install-prometheus/prometheus/README.md new file mode 100644 index 00000000..3246e371 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/README.md @@ -0,0 +1,376 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.7+ + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repository](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +Starting with version 16.0, the Prometheus chart requires Helm 3.7+ in order to install successfully. Please check your `helm` release before installation. + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [alertmanager](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager) +- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) +- [prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) +- [prometheus-pushgateway](https://github.com/walker-tom/helm-charts/tree/main/charts/prometheus-pushgateway) + +To disable the dependency during installation, set `alertmanager.enabled`, `kube-state-metrics.enabled`, `prometheus-node-exporter.enabled` and `prometheus-pushgateway.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Updating values.schema.json + +A [`values.schema.json`](https://helm.sh/docs/topics/charts/#schema-files) file has been added to validate chart values. When `values.yaml` file has a structure change (i.e. add a new field, change value type, etc.), modify `values.schema.json` file manually or run `helm schema-gen values.yaml > values.schema.json` to ensure the schema is aligned with the latest values. Refer to [helm plugin `helm-schema-gen`](https://github.com/karuppiah7890/helm-schema-gen) for plugin installation instructions. + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 24.0 + +Require Kubernetes 1.19+ + +Release 1.0.0 of the _alertmanager_ replaced [configmap-reload](https://github.com/jimmidyson/configmap-reload) with [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via `configmapReload.prometheus.extraArgs` are not compatible and will break with the new prometheus-config-reloader. Please, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +### To 23.0 + +Release 5.0.0 of the _kube-state-metrics_ chart introduced a separation of the `image.repository` value in two distinct values: + +```console + image: + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics +``` + +If a custom values file or CLI flags set `kube-state.metrics.image.repository`, please, set the new values accordingly. + +If you are upgrading _prometheus-pushgateway_ with the chart and _prometheus-pushgateway_ has been deployed as a statefulset with a persistent volume, the statefulset must be deleted before upgrading the chart, e.g.: + +```bash +kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway -n monitoring --cascade=orphan +``` + +Users are advised to review changes in the corresponding chart releases before upgrading. + +### To 22.0 + +The `app.kubernetes.io/version` label has been removed from the pod selector. + +Therefore, you must delete the previous StatefulSet or Deployment before upgrading. Performing this operation will cause **Prometheus to stop functioning** until the upgrade is complete. + +```console +kubectl delete deploy,sts -l app.kubernetes.io/name=prometheus +``` + +### To 21.0 + +The Kubernetes labels have been updated to follow [Helm 3 label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +Specifically, labels mapping is listed below: + +| OLD | NEW | +|--------------------|------------------------------| +|heritage | app.kubernetes.io/managed-by | +|chart | helm.sh/chart | +|[container version] | app.kubernetes.io/version | +|app | app.kubernetes.io/name | +|release | app.kubernetes.io/instance | + +Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. + +If `runAsStatefulSet: false` (this is the default): + +```console +kubectl delete deploy -l app=prometheus +``` + +If `runAsStatefulSet: true`: + +```console +kubectl delete sts -l app=prometheus +``` + +After that do the actual upgrade: + +```console +helm upgrade -i prometheus prometheus-community/prometheus +``` + +### To 20.0 + +The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +### To 19.0 + +Prometheus has been updated to version v2.40.5. + +Prometheus-pushgateway was updated to version 2.0.0 which adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +See the [upgrade docs of the prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway#to-200) to see whats to do, before you upgrade Prometheus! + +The condition in Chart.yaml to disable kube-state-metrics has been changed from `kubeStateMetrics.enabled` to `kube-state-metrics.enabled` + +The Docker image tag is used from appVersion field in Chart.yaml by default. + +Unused subchart configs has been removed and subchart config is now on the bottom of the config file. + +If Prometheus is used as deployment the updatestrategy has been changed to "Recreate" by default, so Helm updates work out of the box. + +`.Values.server.extraTemplates` & `.Values.server.extraObjects` has been removed in favour of `.Values.extraManifests`, which can do the same. + +`.Values.server.enabled` has been removed as it's useless now that all components are created by subcharts. + +All files in `templates/server` directory has been moved to `templates` directory. + +```bash +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 19.0.0 +``` + +### To 18.0 + +Version 18.0.0 uses alertmanager service from the [alertmanager chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager). If you've made some config changes, please check the old `alertmanager` and the new `alertmanager` configuration section in values.yaml for differences. + +Note that the `configmapReload` section for `alertmanager` was moved out of dedicated section (`configmapReload.alertmanager`) to alertmanager embedded (`alertmanager.configmapReload`). + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 17.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 18.0.0 +``` + +### To 17.0 + +Version 17.0.0 uses pushgateway service from the [prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway). If you've made some config changes, please check the old `pushgateway` and the new `prometheus-pushgateway` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 16.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 17.0.0 +``` + +### To 16.0 + +Starting from version 16.0 embedded services (like alertmanager, node-exporter etc.) are moved out of Prometheus chart and the respecting charts from this repository are used as dependencies. Version 16.0.0 moves node-exporter service to [prometheus-node-exporter chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter). If you've made some config changes, please check the old `nodeExporter` and the new `prometheus-node-exporter` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 15.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 16.0.0 +``` + +### To 15.0 + +Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. + +Before you update please execute the following command, to be able to update kube-state-metrics: + +```bash +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see its configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/.helmignore b/install-bundler/install-prometheus/prometheus/charts/alertmanager/.helmignore new file mode 100644 index 00000000..7653e97e --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +unittests/ diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/Chart.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/Chart.yaml new file mode 100644 index 00000000..5225d1b3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: v0.26.0 +description: The Alertmanager handles alerts sent by client applications such as the + Prometheus server. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- monitoring +kubeVersion: '>=1.19.0-0' +maintainers: +- email: monotek23@gmail.com + name: monotek +- email: naseem@transit.app + name: naseemkullah +name: alertmanager +sources: +- https://github.com/prometheus/alertmanager +type: application +version: 1.6.0 diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/README.md b/install-bundler/install-prometheus/prometheus/charts/alertmanager/README.md new file mode 100644 index 00000000..d3f4df73 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/README.md @@ -0,0 +1,62 @@ +# Alertmanager + +As per [prometheus.io documentation](https://prometheus.io/docs/alerting/latest/alertmanager/): +> The Alertmanager handles alerts sent by client applications such as the +> Prometheus server. It takes care of deduplicating, grouping, and routing them +> to the correct receiver integration such as email, PagerDuty, or OpsGenie. It +> also takes care of silencing and inhibition of alerts. + +## Prerequisites + +Kubernetes 1.14+ + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/alertmanager +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 1.0 + +The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extea command-line arguments. +The `networking.k8s.io/v1beta1` is no longer supported. use [`networking.k8s.io/v1`](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#ingressclass-v122). + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/alertmanager +``` diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/ci/config-reload-values.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/ci/config-reload-values.yaml new file mode 100644 index 00000000..cba5de8e --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/ci/config-reload-values.yaml @@ -0,0 +1,2 @@ +configmapReload: + enabled: true diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/NOTES.txt b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/NOTES.txt new file mode 100644 index 00000000..46ea5bee --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "alertmanager.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ include "alertmanager.namespace" . }} svc -w {{ include "alertmanager.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "alertmanager.namespace" . }} {{ include "alertmanager.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "alertmanager.namespace" . }} -l "app.kubernetes.io/name={{ include "alertmanager.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:{{ .Values.service.port }} to use your application" + kubectl --namespace {{ include "alertmanager.namespace" . }} port-forward $POD_NAME {{ .Values.service.port }}:80 +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/_helpers.tpl b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/_helpers.tpl new file mode 100644 index 00000000..827b6ee9 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/_helpers.tpl @@ -0,0 +1,92 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "alertmanager.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "alertmanager.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "alertmanager.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "alertmanager.labels" -}} +helm.sh/chart: {{ include "alertmanager.chart" . }} +{{ include "alertmanager.selectorLabels" . }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "alertmanager.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alertmanager.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "alertmanager.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "alertmanager.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Define Ingress apiVersion +*/}} +{{- define "alertmanager.ingress.apiVersion" -}} +{{- printf "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Define Pdb apiVersion +*/}} +{{- define "alertmanager.pdb.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- printf "policy/v1" }} +{{- else }} +{{- printf "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Allow overriding alertmanager namespace +*/}} +{{- define "alertmanager.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/configmap.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/configmap.yaml new file mode 100644 index 00000000..9e5882dc --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/configmap.yaml @@ -0,0 +1,21 @@ +{{- if .Values.config.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.configAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +data: + alertmanager.yml: | + {{- $config := omit .Values.config "enabled" }} + {{- toYaml $config | default "{}" | nindent 4 }} + {{- range $key, $value := .Values.templates }} + {{ $key }}: |- + {{- $value | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/ingress.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/ingress.yaml new file mode 100644 index 00000000..02910330 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/ingress.yaml @@ -0,0 +1,42 @@ +{{- if .Values.ingress.enabled }} +{{- $fullName := include "alertmanager.fullname" . }} +{{- $svcPort := .Values.service.port }} +apiVersion: {{ include "alertmanager.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + ingressClassName: {{ .Values.ingress.className }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/pdb.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/pdb.yaml new file mode 100644 index 00000000..103e9ecd --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "alertmanager.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/serviceaccount.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/serviceaccount.yaml new file mode 100644 index 00000000..bc9ccaaf --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "alertmanager.serviceAccountName" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/services.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/services.yaml new file mode 100644 index 00000000..9637ae75 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/services.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := . }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alertmanager.fullname" . }}-headless + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + clusterIP: None + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - port: {{ .Values.service.clusterPort }} + targetPort: clusterpeer-tcp + protocol: TCP + name: cluster-tcp + - port: {{ .Values.service.clusterPort }} + targetPort: clusterpeer-udp + protocol: UDP + name: cluster-udp + {{- end }} + {{- with .Values.service.extraPorts }} + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + {{- include "alertmanager.selectorLabels" . | nindent 4 }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/statefulset.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/statefulset.yaml new file mode 100644 index 00000000..25d81a92 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/statefulset.yaml @@ -0,0 +1,247 @@ +{{- $svcClusterPort := .Values.service.clusterPort }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "alertmanager.fullname" . }} + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.statefulSet.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "alertmanager.selectorLabels" . | nindent 6 }} + serviceName: {{ include "alertmanager.fullname" . }}-headless + template: + metadata: + labels: + {{- include "alertmanager.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- if not .Values.configmapReload.enabled }} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "alertmanager.serviceAccountName" . }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.schedulerName }} + schedulerName: {{ . }} + {{- end }} + {{- if or .Values.podAntiAffinity .Values.affinity }} + affinity: + {{- end }} + {{- with .Values.affinity }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if eq .Values.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]} + {{- else if eq .Values.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app.kubernetes.io/name, operator: In, values: [{{ include "alertmanager.name" . }}]} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- with .Values.extraInitContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.enabled }} + - name: {{ .Chart.Name }}-{{ .Values.configmapReload.name }} + image: "{{ .Values.configmapReload.image.repository }}:{{ .Values.configmapReload.image.tag }}" + imagePullPolicy: "{{ .Values.configmapReload.image.pullPolicy }}" + {{- with .Values.configmapReload.extraEnv }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + {{- if and (hasKey .Values.configmapReload.extraArgs "config-file" | not) (hasKey .Values.configmapReload.extraArgs "watched-dir" | not) }} + - --watched-dir=/etc/alertmanager + {{- end }} + {{- if not (hasKey .Values.configmapReload.extraArgs "reload-url") }} + - --reload-url=http://127.0.0.1:9093/-/reload + {{- end }} + {{- range $key, $value := .Values.configmapReload.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + resources: + {{- toYaml .Values.configmapReload.resources | nindent 12 }} + {{- with .Values.configmapReload.containerPort }} + ports: + - containerPort: {{ . }} + {{- end }} + {{- with .Values.configmapReload.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config + mountPath: /etc/alertmanager + {{- if .Values.configmapReload.extraVolumeMounts }} + {{- toYaml .Values.configmapReload.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- end }} + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + {{- with .Values.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --storage.path=/alertmanager + {{- if not (hasKey .Values.extraArgs "config.file") }} + - --config.file=/etc/alertmanager/alertmanager.yml + {{- end }} + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - --cluster.advertise-address=[$(POD_IP)]:{{ $svcClusterPort }} + - --cluster.listen-address=0.0.0.0:{{ $svcClusterPort }} + {{- end }} + {{- if gt (int .Values.replicaCount) 1}} + {{- $fullName := include "alertmanager.fullname" . }} + {{- range $i := until (int .Values.replicaCount) }} + - --cluster.peer={{ $fullName }}-{{ $i }}.{{ $fullName }}-headless:{{ $svcClusterPort }} + {{- end }} + {{- end }} + {{- if .Values.additionalPeers }} + {{- range $item := .Values.additionalPeers }} + - --cluster.peer={{ $item }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + ports: + - name: http + containerPort: 9093 + protocol: TCP + {{- if or (gt (int .Values.replicaCount) 1) (.Values.additionalPeers) }} + - name: clusterpeer-tcp + containerPort: {{ $svcClusterPort }} + protocol: TCP + - name: clusterpeer-udp + containerPort: {{ $svcClusterPort }} + protocol: UDP + {{- end }} + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 12 }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + {{- if .Values.config.enabled }} + - name: config + mountPath: /etc/alertmanager + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: /alertmanager + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.config.enabled }} + - name: config + configMap: + name: {{ include "alertmanager.fullname" . }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + {{- toYaml .Values.persistence.accessModes | nindent 10 }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: {{ .Values.persistence.storageClass }} + {{- end }} + {{- end }} + {{- else }} + - name: storage + emptyDir: {} + {{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/tests/test-connection.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/tests/test-connection.yaml new file mode 100644 index 00000000..410eba5b --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/templates/tests/test-connection.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "alertmanager.fullname" . }}-test-connection" + labels: + {{- include "alertmanager.labels" . | nindent 4 }} + {{- with .Values.testFramework.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ include "alertmanager.namespace" . }} +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "alertmanager.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/alertmanager/values.yaml b/install-bundler/install-prometheus/prometheus/charts/alertmanager/values.yaml new file mode 100644 index 00000000..5c4ce585 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/alertmanager/values.yaml @@ -0,0 +1,301 @@ +# yaml-language-server: $schema=values.schema.json +# Default values for alertmanager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +image: + repository: quay.io/prometheus/alertmanager + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +extraArgs: {} + +## Additional Alertmanager Secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +## namespaceOverride overrides the namespace which the resources will be deployed in +namespaceOverride: "" + +automountServiceAccountToken: true + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# Sets priorityClassName in alertmanager pod +priorityClassName: "" + +# Sets schedulerName in alertmanager pod +schedulerName: "" + +podSecurityContext: + fsGroup: 65534 +dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 +hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "foo.local" + # - "bar.local" + # - ip: "10.1.2.3" + # hostnames: + # - "foo.remote" + # - "bar.remote" +securityContext: + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + +additionalPeers: [] + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +## Additional containers to add to the stateful set. This will allow to setup sidecarContainers like a proxy to integrate +## alertmanager with an external tool like teams that has not direct integration. +## +extraContainers: [] + +livenessProbe: + httpGet: + path: / + port: http + +readinessProbe: + httpGet: + path: / + port: http + +service: + annotations: {} + labels: {} + type: ClusterIP + port: 9093 + clusterPort: 9094 + loadBalancerIP: "" # Assign ext IP when Service type is LoadBalancer + loadBalancerSourceRanges: [] # Only allow access to loadBalancerIP from these IPs + # if you want to force a specific nodePort. Must be use with service.type=NodePort + # nodePort: + + # Optionally specify extra list of additional ports exposed on both services + extraPorts: [] + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: alertmanager.domain.com + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - alertmanager.domain.com + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 32Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +## Pod anti-affinity can prevent the scheduler from placing Alertmanager replicas on the same node. +## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. +## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. +## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. +## +podAntiAffinity: "" + +## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. +## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone +## +podAntiAffinityTopologyKey: kubernetes.io/hostname + +## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: alertmanager + +statefulSet: + annotations: {} + +podAnnotations: {} +podLabels: {} + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + +command: [] + +persistence: + enabled: true + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 50Mi + +configAnnotations: {} + ## For example if you want to provide private data from a secret vault + ## https://github.com/banzaicloud/bank-vaults/tree/main/charts/vault-secrets-webhook + ## P.s.: Add option `configMapMutation: true` for vault-secrets-webhook + # vault.security.banzaicloud.io/vault-role: "admin" + # vault.security.banzaicloud.io/vault-addr: "https://vault.vault.svc.cluster.local:8200" + # vault.security.banzaicloud.io/vault-skip-verify: "true" + # vault.security.banzaicloud.io/vault-path: "kubernetes" + ## Example for inject secret + # slack_api_url: '${vault:secret/data/slack-hook-alerts#URL}' + +config: + enabled: true + global: {} + # slack_api_url: '' + + templates: + - '/etc/alertmanager/*.tmpl' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + ## If false, the configmap-reload container will not be deployed + ## + enabled: false + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.66.0 + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + + extraArgs: {} + + ## Optionally specify extra list of additional volumeMounts + extraVolumeMounts: [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + + ## Optionally specify extra environment variables to add to alertmanager container + extraEnv: [] + # - name: FOO + # value: BAR + + securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsUser: 65534 + # runAsNonRoot: true + # runAsGroup: 65534 + +templates: {} +# alertmanager.tmpl: |- + +## Optionally specify extra list of additional volumeMounts +extraVolumeMounts: [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +## Optionally specify extra list of additional volumes +extraVolumes: [] + # - name: extras + # emptyDir: {} + +## Optionally specify extra environment variables to add to alertmanager container +extraEnv: [] + # - name: FOO + # value: BAR + +testFramework: + enabled: false + annotations: + "helm.sh/hook": test-success + # "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/.helmignore b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/Chart.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/Chart.yaml new file mode 100644 index 00000000..cdd4b6f2 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: 2.9.2 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +- email: david@0xdc.me + name: dotdc +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +type: application +version: 5.11.0 diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/README.md b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/README.md new file mode 100644 index 00000000..843be89e --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/README.md @@ -0,0 +1,85 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics + +You can upgrade in-place: + +1. [get repository info](#get-repository-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repository + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: + +* Dropped support for helm v2 (helm v3 or later is required) +* collectors key was renamed to resources +* namespace key was renamed to namespaces + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values prometheus-community/kube-state-metrics +``` + +### kube-rbac-proxy + +You can enable `kube-state-metrics` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy one RBAC proxy container per endpoint (metrics & telemetry). +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kube-state-metrics-read +rules: + - apiGroups: [ "" ] + resources: ["services/kube-state-metrics"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/NOTES.txt b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/NOTES.txt new file mode 100644 index 00000000..3589c24e --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,23 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints are now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "kube-state-metrics.fullname" . }}"] + verbs: + - get +``` +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/_helpers.tpl b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100644 index 00000000..a4358c87 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,156 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kube-state-metrics.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate basic labels +*/}} +{{- define "kube-state-metrics.labels" }} +helm.sh/chart: {{ template "kube-state-metrics.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ template "kube-state-metrics.name" . }} +{{- include "kube-state-metrics.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kube-state-metrics.selectorLabels" }} +{{- if .Values.selectorOverride }} +{{ toYaml .Values.selectorOverride }} +{{- else }} +app.kubernetes.io/name: {{ include "kube-state-metrics.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for servicemonitor */}} +{{- define "servicemonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end -}} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "kube-state-metrics.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +The image to use for kube-state-metrics +*/}} +{{- define "kube-state-metrics.image" -}} +{{- if .Values.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +The image to use for kubeRBACProxy +*/}} +{{- define "kubeRBACProxy.image" -}} +{{- if .Values.kubeRBACProxy.image.sha }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) .Values.kubeRBACProxy.image.sha }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.kubeRBACProxy.image.registry .Values.kubeRBACProxy.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.kubeRBACProxy.image.tag) }} +{{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml new file mode 100644 index 00000000..025cd47a --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/ciliumnetworkpolicy.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "cilium") }} +apiVersion: cilium.io/v2 +kind: CiliumNetworkPolicy +metadata: + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +spec: + endpointSelector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + egress: + {{- if and .Values.networkPolicy.cilium .Values.networkPolicy.cilium.kubeApiServerSelector }} + {{ toYaml .Values.networkPolicy.cilium.kubeApiServerSelector | nindent 6 }} + {{- else }} + - toEntities: + - kube-apiserver + {{- end }} + ingress: + - toPorts: + - ports: + - port: {{ .Values.service.port | quote }} + protocol: TCP + {{- if .Values.selfMonitor.enabled }} + - port: {{ .Values.selfMonitor.telemetryPort | default 8081 | quote }} + protocol: TCP + {{ end }} +{{ end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..cf9f628d --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml new file mode 100644 index 00000000..72986a60 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/crs-configmap.yaml @@ -0,0 +1,9 @@ +{{- if .Values.customResourceState.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-customresourcestate-config +data: + config.yaml: | + {{- toYaml .Values.customResourceState.config | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/deployment.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/deployment.yaml new file mode 100644 index 00000000..31aa6101 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,279 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- if .Values.annotations }} + annotations: +{{ toYaml .Values.annotations | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] + {{- end }} + template: + metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 8 }} + {{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + {{- $httpPort := ternary 9090 (.Values.service.port | default 8080) .Values.kubeRBACProxy.enabled}} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} + - name: {{ template "kube-state-metrics.name" . }} + {{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- end }} + args: + {{- if .Values.extraArgs }} + {{- .Values.extraArgs | toYaml | nindent 8 }} + {{- end }} + - --port={{ $httpPort }} + {{- if .Values.collectors }} + - --resources={{ .Values.collectors | join "," }} + {{- end }} + {{- if .Values.metricLabelsAllowlist }} + - --metric-labels-allowlist={{ .Values.metricLabelsAllowlist | join "," }} + {{- end }} + {{- if .Values.metricAnnotationsAllowList }} + - --metric-annotations-allowlist={{ .Values.metricAnnotationsAllowList | join "," }} + {{- end }} + {{- if .Values.metricAllowlist }} + - --metric-allowlist={{ .Values.metricAllowlist | join "," }} + {{- end }} + {{- if .Values.metricDenylist }} + - --metric-denylist={{ .Values.metricDenylist | join "," }} + {{- end }} + {{- $namespaces := list }} + {{- if .Values.namespaces }} + {{- range $ns := join "," .Values.namespaces | split "," }} + {{- $namespaces = append $namespaces (tpl $ns $) }} + {{- end }} + {{- end }} + {{- if .Values.releaseNamespace }} + {{- $namespaces = append $namespaces ( include "kube-state-metrics.namespace" . ) }} + {{- end }} + {{- if $namespaces }} + - --namespaces={{ $namespaces | mustUniq | join "," }} + {{- end }} + {{- if .Values.namespacesDenylist }} + - --namespaces-denylist={{ tpl (.Values.namespacesDenylist | join ",") $ }} + {{- end }} + {{- if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) + {{- end }} + {{- if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - --telemetry-host=127.0.0.1 + - --telemetry-port={{ $telemetryPort }} + {{- else }} + {{- if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} + {{- end }} + {{- if .Values.selfMonitor.telemetryPort }} + - --telemetry-port={{ $telemetryPort }} + {{- end }} + {{- if .Values.customResourceState.enabled }} + - --custom-resource-state-config-file=/etc/customresourcestate/config.yaml + {{- end }} + {{- end }} + {{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumeMounts) }} + volumeMounts: + {{- if .Values.kubeconfig.enabled }} + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true + {{- end }} + {{- if .Values.customResourceState.enabled }} + - name: customresourcestate-config + mountPath: /etc/customresourcestate + readOnly: true + {{- end }} + {{- if .Values.volumeMounts }} +{{ toYaml .Values.volumeMounts | indent 8 }} + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ include "kube-state-metrics.image" . }} + {{- if eq .Values.kubeRBACProxy.enabled false }} + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: "http" + {{- if .Values.selfMonitor.enabled }} + - containerPort: {{ $telemetryPort }} + name: "metrics" + {{- end }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: {{ $httpPort }} + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: {{ $httpPort }} + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-http + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + - --secure-listen-address=:{{ .Values.service.port | default 8080}} + - --upstream=http://127.0.0.1:{{ $httpPort }}/ + - --proxy-endpoints-port=8888 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + {{- with .Values.kubeRBACProxy.volumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.service.port | default 8080}} + name: "http" + - containerPort: 8888 + name: "http-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8888 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- if .Values.selfMonitor.enabled }} + - name: kube-rbac-proxy-telemetry + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 8 }} + {{- end }} + - --secure-listen-address=:{{ .Values.selfMonitor.telemetryPort | default 8081 }} + - --upstream=http://127.0.0.1:{{ $telemetryPort }}/ + - --proxy-endpoints-port=8889 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + {{- with .Values.kubeRBACProxy.volumeMounts }} + {{- toYaml . | nindent 10 }} + {{- end }} + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + image: {{ include "kubeRBACProxy.image" . }} + ports: + - containerPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + name: "metrics" + - containerPort: 8889 + name: "metrics-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8889 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: +{{ toYaml .Values.kubeRBACProxy.resources | indent 10 }} +{{- end }} +{{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: +{{ toYaml .Values.kubeRBACProxy.containerSecurityContext | indent 10 }} +{{- end }} + {{- end }} + {{- end }} +{{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if or (.Values.kubeconfig.enabled) (.Values.customResourceState.enabled) (.Values.volumes) (.Values.kubeRBACProxy.enabled) }} + volumes: + {{- if .Values.kubeconfig.enabled}} + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + {{- end }} + {{- if .Values.kubeRBACProxy.enabled}} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config + {{- end }} + {{- if .Values.customResourceState.enabled}} + - name: customresourcestate-config + configMap: + name: {{ template "kube-state-metrics.fullname" . }}-customresourcestate-config + {{- end }} + {{- if .Values.volumes }} +{{ toYaml .Values.volumes | indent 8 }} + {{- end }} + {{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml new file mode 100644 index 00000000..567f7bf3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml new file mode 100644 index 00000000..6af00845 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -0,0 +1,12 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml new file mode 100644 index 00000000..309b38ec --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.networkPolicy.enabled (eq .Values.networkPolicy.flavor "kubernetes") }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + {{- if .Values.annotations }} + annotations: + {{ toYaml .Values.annotations | nindent 4 }} + {{- end }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +spec: + {{- if .Values.networkPolicy.egress }} + ## Deny all egress by default + egress: + {{- toYaml .Values.networkPolicy.egress | nindent 4 }} + {{- end }} + ingress: + {{- if .Values.networkPolicy.ingress }} + {{- toYaml .Values.networkPolicy.ingress | nindent 4 }} + {{- else }} + ## Allow ingress on default ports by default + - ports: + - port: {{ .Values.service.port | default 8080 }} + protocol: TCP + {{- if .Values.selfMonitor.enabled }} + {{- $telemetryPort := ternary 9091 (.Values.selfMonitor.telemetryPort | default 8081) .Values.kubeRBACProxy.enabled}} + - port: {{ $telemetryPort }} + protocol: TCP + {{- end }} + {{- end }} + podSelector: + {{- if .Values.networkPolicy.podSelector }} + {{- toYaml .Values.networkPolicy.podSelector | nindent 4 }} + {{- else }} + matchLabels: + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + policyTypes: + - Ingress + - Egress +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/pdb.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/pdb.yaml new file mode 100644 index 00000000..3771b511 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget -}} +{{ if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" -}} +apiVersion: policy/v1 +{{- else -}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000..8905e113 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,39 @@ +{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100644 index 00000000..654e4a3d --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100644 index 00000000..5b62a18b --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml new file mode 100644 index 00000000..b0a511e9 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rbac-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-rbac-config + namespace: {{ template "kube-state-metrics.namespace" . }} +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "kube-state-metrics.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "kube-state-metrics.fullname" . }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/role.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/role.yaml new file mode 100644 index 00000000..d33687f2 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/role.yaml @@ -0,0 +1,212 @@ +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (ternary (join "," .Values.namespaces | split "," ) (list "") (eq $.Values.rbac.useClusterRole false)) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq $.Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq $.Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if has "certificatesigningrequests" $.Values.collectors }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if has "configmaps" $.Values.collectors }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if has "cronjobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "daemonsets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "deployments" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpoints" $.Values.collectors }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if has "endpointslices" $.Values.collectors }} +- apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: ["list", "watch"] +{{ end -}} +{{ if has "horizontalpodautoscalers" $.Values.collectors }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "ingresses" $.Values.collectors }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "jobs" $.Values.collectors }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if has "leases" $.Values.collectors }} +- apiGroups: ["coordination.k8s.io"] + resources: + - leases + verbs: ["list", "watch"] +{{ end -}} +{{ if has "limitranges" $.Values.collectors }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if has "mutatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "namespaces" $.Values.collectors }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if has "networkpolicies" $.Values.collectors }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if has "nodes" $.Values.collectors }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumeclaims" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if has "persistentvolumes" $.Values.collectors }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if has "poddisruptionbudgets" $.Values.collectors }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "pods" $.Values.collectors }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicasets" $.Values.collectors }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "replicationcontrollers" $.Values.collectors }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if has "resourcequotas" $.Values.collectors }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if has "secrets" $.Values.collectors }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "services" $.Values.collectors }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if has "statefulsets" $.Values.collectors }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if has "storageclasses" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if has "validatingwebhookconfigurations" $.Values.collectors }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if has "volumeattachments" $.Values.collectors }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{- if $.Values.kubeRBACProxy.enabled }} +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: ["create"] +- apiGroups: ["authorization.k8s.io"] + resources: + - subjectaccessreviews + verbs: ["create"] +{{- end }} +{{- if $.Values.customResourceState.enabled }} +- apiGroups: ["apiextensions.k8s.io"] + resources: + - customresourcedefinitions + verbs: ["list", "watch"] +{{- end }} +{{ if $.Values.rbac.extraRules }} +{{ toYaml $.Values.rbac.extraRules }} +{{ end }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml new file mode 100644 index 00000000..330651b7 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/rolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (join "," $.Values.namespaces) | split "," }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "kube-state-metrics.labels" $ | indent 4 }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/service.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/service.yaml new file mode 100644 index 00000000..6c486a66 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port | default 8080}} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.port | default 8080}} + {{ if .Values.selfMonitor.enabled }} + - name: "metrics" + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + {{- if .Values.selfMonitor.telemetryNodePort }} + nodePort: {{ .Values.selfMonitor.telemetryNodePort }} + {{- end }} + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} +{{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} +{{- if .Values.autosharding.enabled }} + clusterIP: None +{{- else if .Values.service.clusterIP }} + clusterIP: "{{ .Values.service.clusterIP }}" +{{- end }} + selector: + {{- include "kube-state-metrics.selectorLabels" . | indent 4 }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100644 index 00000000..a7ff4dd3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: + {{- include "kube-state-metrics.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100644 index 00000000..f98b3f36 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,107 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.monitor.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + {{- with .Values.prometheus.monitor.targetLabels }} + targetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | indent 2 }} + selector: + matchLabels: + {{- with .Values.prometheus.monitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "kube-state-metrics.selectorLabels" . | indent 6 }} + {{- end }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.scheme }} + scheme: {{ .Values.prometheus.monitor.scheme }} + {{- end }} + {{- if .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.prometheus.monitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.prometheus.monitor.bearerTokenFile }} + {{- end }} + {{- with .Values.prometheus.monitor.bearerTokenSecret }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.selfMonitor.enabled }} + - port: metrics + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} + {{- if .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ .Values.prometheus.monitor.proxyUrl}} + {{- end }} + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{- if .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.prometheus.monitor.metricRelabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml .Values.prometheus.monitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.scheme }} + scheme: {{ .Values.prometheus.monitor.scheme }} + {{- end }} + {{- if .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.prometheus.monitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.prometheus.monitor.bearerTokenFile }} + {{- end }} + {{- with .Values.prometheus.monitor.bearerTokenSecret }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100644 index 00000000..489de147 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100644 index 00000000..73b37a4f --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.serviceAccountName" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml new file mode 100644 index 00000000..f46305b5 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/templates/verticalpodautoscaler.yaml @@ -0,0 +1,44 @@ +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + {{- include "kube-state-metrics.labels" . | indent 4 }} +spec: + {{- with .Values.verticalPodAutoscaler.recommenders }} + recommenders: + {{- toYaml . | nindent 4 }} + {{- end }} + resourcePolicy: + containerPolicies: + - containerName: {{ template "kube-state-metrics.name" . }} + {{- with .Values.verticalPodAutoscaler.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.controlledValues }} + controlledValues: {{ .Values.verticalPodAutoscaler.controlledValues }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.maxAllowed }} + maxAllowed: + {{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }} + {{- end }} + {{- if .Values.verticalPodAutoscaler.minAllowed }} + minAllowed: + {{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }} + {{- end }} + targetRef: + apiVersion: apps/v1 + {{- if .Values.autosharding.enabled }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ template "kube-state-metrics.fullname" . }} + {{- with .Values.verticalPodAutoscaler.updatePolicy }} + updatePolicy: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/values.yaml b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/values.yaml new file mode 100644 index 00000000..5bc13c1c --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/kube-state-metrics/values.yaml @@ -0,0 +1,440 @@ +# Default values for kube-state-metrics. +prometheusScrape: true +image: + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics + # If unset use v + .Charts.appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md +extraArgs: [] + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + # Only allow access to the loadBalancerIP from these IPs + loadBalancerSourceRanges: [] + clusterIP: "" + annotations: {} + +## Additional labels to add to all resources +customLabels: {} + # app: kube-state-metrics + +## Override selector labels +selectorOverride: {} + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + + # Add permissions for CustomResources' apiGroups in Role/ClusterRole. Should be used in conjunction with Custom Resource State Metrics configuration + # Example: + # - apiGroups: ["monitoring.coreos.com"] + # resources: ["prometheuses"] + # verbs: ["list", "watch"] + extraRules: [] + +# Configure kube-rbac-proxy. When enabled, creates one kube-rbac-proxy container per exposed HTTP endpoint (metrics and telemetry if enabled). +# The requests are served through the same service but requests are then HTTPS. +kubeRBACProxy: + enabled: false + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.14.0 + sha: "" + pullPolicy: IfNotPresent + + # List of additional cli arguments to configure kube-rbac-prxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} + + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 200m + memory: 200Mi + requests: + cpu: 200m + memory: 200Mi + + ## volumeMounts enables mounting custom volumes in rbac-proxy containers + ## Useful for TLS certificates and keys + volumeMounts: [] + # - mountPath: /etc/tls + # name: kube-rbac-proxy-tls + # readOnly: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +prometheus: + monitor: + enabled: false + annotations: {} + additionalLabels: {} + namespace: "" + jobLabel: "" + targetLabels: [] + podTargetLabels: [] + interval: "" + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + scrapeTimeout: "" + proxyUrl: "" + selectorOverride: {} + honorLabels: false + metricRelabelings: [] + relabelings: [] + scheme: "" + ## File to read bearer token for scraping targets + bearerTokenFile: "" + ## Secret to mount to read bearer token for scraping targets. The secret needs + ## to be in the same namespace as the service monitor and accessible by the + ## Prometheus Operator + bearerTokenSecret: {} + # name: secret-name + # key: key-name + tlsConfig: {} + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +## Configure network policy for kube-state-metrics +networkPolicy: + enabled: false + # networkPolicy.flavor -- Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + ## Configure the cilium network policy kube-apiserver selector + # cilium: + # kubeApiServerSelector: + # - toEntities: + # - kube-apiserver + + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app.kubernetes.io/name: kube-state-metrics + +securityContext: + enabled: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +## Topology spread constraints for pod assignment +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + +# Annotations to be added to the deployment/statefulset +annotations: {} + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Comma-separated list of metrics to be exposed. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricAllowlist: [] + +# Comma-separated list of metrics not to be enabled. +# This list comprises of exact metric names and/or regex patterns. +# The allowlist and denylist are mutually exclusive. +metricDenylist: [] + +# Comma-separated list of additional Kubernetes label keys that will be used in the resource's +# labels metric. By default the metric contains only name and namespace labels. +# To include additional labels, provide a list of resource names in their plural form and Kubernetes +# label keys you would like to allow for them (Example: '=namespaces=[k8s-label-1,k8s-label-n,...],pods=[app],...)'. +# A single '*' can be provided per resource instead to allow any labels, but that has +# severe performance implications (Example: '=pods=[*]'). +metricLabelsAllowlist: [] + # - namespaces=[k8s-label-1,k8s-label-n] + +# Comma-separated list of Kubernetes annotations keys that will be used in the resource' +# labels metric. By default the metric contains only name and namespace labels. +# To include additional annotations provide a list of resource names in their plural form and Kubernetes +# annotation keys you would like to allow for them (Example: '=namespaces=[kubernetes.io/team,...],pods=[kubernetes.io/team],...)'. +# A single '*' can be provided per resource instead to allow any annotations, but that has +# severe performance implications (Example: '=pods=[*]'). +metricAnnotationsAllowList: [] + # - pods=[k8s-annotation-1,k8s-annotation-n] + +# Available collectors for kube-state-metrics. +# By default, all available resources are enabled, comment out to disable. +collectors: + - certificatesigningrequests + - configmaps + - cronjobs + - daemonsets + - deployments + - endpoints + - horizontalpodautoscalers + - ingresses + - jobs + - leases + - limitranges + - mutatingwebhookconfigurations + - namespaces + - networkpolicies + - nodes + - persistentvolumeclaims + - persistentvolumes + - poddisruptionbudgets + - pods + - replicasets + - replicationcontrollers + - resourcequotas + - secrets + - services + - statefulsets + - storageclasses + - validatingwebhookconfigurations + - volumeattachments + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Enabling support for customResourceState, will create a configMap including your config that will be read from kube-state-metrics +customResourceState: + enabled: false + # Add (Cluster)Role permissions to list/watch the customResources defined in the config to rbac.extraRules + config: {} + +# Enable only the release namespace for collecting resources. By default all namespaces are collected. +# If releaseNamespace and namespaces are both set a merged list will be collected. +releaseNamespace: false + +# Comma-separated list(string) or yaml list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" + +# Comma-separated list of namespaces not to be enabled. If namespaces and namespaces-denylist are both set, +# only namespaces that are excluded in namespaces-denylist will be used. +namespacesDenylist: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: + cpu: 100m + memory: 64Mi + requests: + cpu: 10m + memory: 32Mi + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overridden +# If you set telemetryNodePort, you must also set service.type to NodePort +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 + # telemetryNodePort: 0 + +# Enable vertical pod autoscaler support for kube-state-metrics +verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: [] + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + # updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + # updateMode: Auto + +# volumeMounts are used to add custom volume mounts to deployment. +# See example below +volumeMounts: [] +# - mountPath: /etc/config +# name: config-volume + +# volumes are used to add custom volumes to deployment +# See example below +volumes: [] +# - configMap: +# name: cm-for-volume +# name: config-volume + +# Extra manifests to deploy as an array +extraManifests: [] + # - apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/.helmignore b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/Chart.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/Chart.yaml new file mode 100644 index 00000000..add8d6cc --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/Chart.yaml @@ -0,0 +1,25 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: 1.6.0 +description: A Helm chart for prometheus node-exporter +home: https://github.com/prometheus/node_exporter/ +keywords: +- node-exporter +- prometheus +- exporter +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rootsandtrees@posteo.de + name: zeritti +name: prometheus-node-exporter +sources: +- https://github.com/prometheus/node_exporter/ +type: application +version: 4.22.0 diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/README.md b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/README.md new file mode 100644 index 00000000..5dbfa328 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/README.md @@ -0,0 +1,100 @@ +# Prometheus `Node Exporter` + +Prometheus exporter for hardware and OS metrics exposed by *NIX kernels, written in Go with pluggable metric collectors. + +This chart bootstraps a prometheus [`Node Exporter`](http://github.com/prometheus/node_exporter) daemonset on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus-node-exporter +``` + +_See [configuration](#configuring) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### 4.16 to 4.17+ + +`containerSecurityContext.readOnlyRootFilesystem` is set to `true` by default. + +### 3.x to 4.x + +Starting from version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade. + +```console +kubectl delete daemonset -l app=prometheus-node-exporter +helm upgrade -i prometheus-node-exporter prometheus-community/prometheus-node-exporter +``` + +If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels. + +### From 2.x to 3.x + +Change the following: + +```yaml +hostRootFsMount: true +``` + +to: + +```yaml +hostRootFsMount: + enabled: true + mountPropagation: HostToContainer +``` + +## Configuring + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus-node-exporter +``` + +### kube-rbac-proxy + +You can enable `prometheus-node-exporter` endpoint protection using `kube-rbac-proxy`. By setting `kubeRBACProxy.enabled: true`, this chart will deploy a RBAC proxy container protecting the node-exporter endpoint. +To authorize access, authenticate your requests (via a `ServiceAccount` for example) with a `ClusterRole` attached such as: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-node-exporter-read +rules: + - apiGroups: [ "" ] + resources: ["services/node-exporter-prometheus-node-exporter"] + verbs: + - get +``` + +See [kube-rbac-proxy examples](https://github.com/brancz/kube-rbac-proxy/tree/master/examples/resource-attributes) for more details. diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/ci/port-values.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/ci/port-values.yaml new file mode 100644 index 00000000..dbfb4b67 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/ci/port-values.yaml @@ -0,0 +1,3 @@ +service: + targetPort: 9102 + port: 9102 diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt new file mode 100644 index 00000000..db8584de --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/NOTES.txt @@ -0,0 +1,29 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-node-exporter.namespace" . }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-node-exporter.namespace" . }} -l "app.kubernetes.io/name={{ template "prometheus-node-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9100 to use your application" + kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100 +{{- end }} + +{{- if .Values.kubeRBACProxy.enabled}} + +kube-rbac-proxy endpoint protections is enabled: +- Metrics endpoints is now HTTPS +- Ensure that the client authenticates the requests (e.g. via service account) with the following role permissions: +``` +rules: + - apiGroups: [ "" ] + resources: ["services/{{ template "prometheus-node-exporter.fullname" . }}"] + verbs: + - get +``` +{{- end }} \ No newline at end of file diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl new file mode 100644 index 00000000..84552fe4 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/_helpers.tpl @@ -0,0 +1,185 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-node-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-node-exporter.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-node-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "prometheus-node-exporter.labels" -}} +helm.sh/chart: {{ include "prometheus-node-exporter.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/component: metrics +app.kubernetes.io/part-of: {{ include "prometheus-node-exporter.name" . }} +{{ include "prometheus-node-exporter.selectorLabels" . }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- if .Values.releaseLabel }} +release: {{ .Release.Name }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus-node-exporter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "prometheus-node-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-node-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +The image to use +*/}} +{{- define "prometheus-node-exporter.image" -}} +{{- if .Values.image.sha }} +{{- fail "image.sha forbidden. Use image.digest instead" }} +{{- else if .Values.image.digest }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s@%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }} +{{- else }} +{{- printf "%s/%s:%s@%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) .Values.image.digest }} +{{- end }} +{{- else }} +{{- if .Values.global.imageRegistry }} +{{- printf "%s/%s:%s" .Values.global.imageRegistry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- else }} +{{- printf "%s/%s:%s" .Values.image.registry .Values.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.image.tag) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "prometheus-node-exporter.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Create the namespace name of the service monitor +*/}} +{{- define "prometheus-node-exporter.monitor-namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- if .Values.prometheus.monitor.namespace }} +{{- .Values.prometheus.monitor.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for servicemonitor */}} +{{- define "servicemonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "Values" .Values "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "prometheus-node-exporter.imagePullSecrets" -}} +{{- range (concat .Values.global.imagePullSecrets .imagePullSecrets) }} + {{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml . | trim }} + {{- else }} +- name: {{ . }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Create the namespace name of the pod monitor +*/}} +{{- define "prometheus-node-exporter.podmonitor-namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- if .Values.prometheus.podMonitor.namespace }} +{{- .Values.prometheus.podMonitor.namespace }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Sets default scrape limits for podmonitor */}} +{{- define "podmonitor.scrapeLimits" -}} +{{- with .sampleLimit }} +sampleLimit: {{ . }} +{{- end }} +{{- with .targetLimit }} +targetLimit: {{ . }} +{{- end }} +{{- with .labelLimit }} +labelLimit: {{ . }} +{{- end }} +{{- with .labelNameLengthLimit }} +labelNameLengthLimit: {{ . }} +{{- end }} +{{- with .labelValueLengthLimit }} +labelValueLengthLimit: {{ . }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml new file mode 100644 index 00000000..1fd91150 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +rules: + {{- if $.Values.kubeRBACProxy.enabled }} + - apiGroups: [ "authentication.k8s.io" ] + resources: + - tokenreviews + verbs: [ "create" ] + - apiGroups: [ "authorization.k8s.io" ] + resources: + - subjectaccessreviews + verbs: [ "create" ] + {{- end }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..653305ad --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.kubeRBACProxy.enabled true) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + name: {{ template "prometheus-node-exporter.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml new file mode 100644 index 00000000..a5116a89 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -0,0 +1,285 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.daemonsetAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 8 }} + spec: + automountServiceAccountToken: {{ ternary true false (or .Values.serviceAccount.automountServiceAccountToken .Values.kubeRBACProxy.enabled) }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.extraInitContainers }} + initContainers: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "prometheus-node-exporter.serviceAccountName" . }} + containers: + {{- $servicePort := ternary 8100 .Values.service.port .Values.kubeRBACProxy.enabled }} + - name: node-exporter + image: {{ include "prometheus-node-exporter.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.hostRootFsMount.enabled }} + - --path.rootfs=/host/root + {{- if semverCompare ">=1.4.0" (default .Chart.AppVersion .Values.image.tag) }} + - --path.udev.data=/host/root/run/udev/data + {{- end }} + {{- end }} + - --web.listen-address=[$(HOST_IP)]:{{ $servicePort }} + {{- with .Values.extraArgs }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + env: + - name: HOST_IP + {{- if .Values.kubeRBACProxy.enabled }} + value: 127.0.0.1 + {{- else if .Values.service.listenOnAllInterfaces }} + value: 0.0.0.0 + {{- else }} + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- end }} + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if eq .Values.kubeRBACProxy.enabled false }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + {{- end }} + livenessProbe: + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + {{- if .Values.kubeRBACProxy.enabled }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.livenessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: / + port: {{ $servicePort }} + scheme: {{ upper .Values.livenessProbe.httpGet.scheme }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + httpGet: + {{- if .Values.kubeRBACProxy.enabled }} + host: 127.0.0.1 + {{- end }} + httpHeaders: + {{- range $_, $header := .Values.readinessProbe.httpGet.httpHeaders }} + - name: {{ $header.name }} + value: {{ $header.value }} + {{- end }} + path: / + port: {{ $servicePort }} + scheme: {{ upper .Values.readinessProbe.httpGet.scheme }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.hostRootFsMount.enabled }} + - name: root + mountPath: /host/root + {{- with .Values.hostRootFsMount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + readOnly: true + {{- end }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- with $mount.mountPropagation }} + mountPropagation: {{ . }} + {{- end }} + {{- end }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: true + {{- end }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + {{- end }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + {{- with .Values.sidecars }} + {{- toYaml . | nindent 8 }} + {{- if or $.Values.sidecarVolumeMount $.Values.sidecarHostVolumeMounts }} + volumeMounts: + {{- range $_, $mount := $.Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- end }} + {{- range $_, $mount := $.Values.sidecarHostVolumeMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- if $mount.mountPropagation }} + mountPropagation: {{ $mount.mountPropagation }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy + args: + {{- if .Values.kubeRBACProxy.extraArgs }} + {{- .Values.kubeRBACProxy.extraArgs | toYaml | nindent 12 }} + {{- end }} + - --secure-listen-address=:{{ .Values.service.port}} + - --upstream=http://127.0.0.1:{{ $servicePort }}/ + - --proxy-endpoints-port=8888 + - --config-file=/etc/kube-rbac-proxy-config/config-file.yaml + volumeMounts: + - name: kube-rbac-proxy-config + mountPath: /etc/kube-rbac-proxy-config + imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }} + {{- if .Values.kubeRBACProxy.image.sha }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}@sha256:{{ .Values.kubeRBACProxy.image.sha }}" + {{- else }} + image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}" + {{- end }} + ports: + - containerPort: {{ .Values.service.port}} + name: "http" + - containerPort: 8888 + name: "http-healthz" + readinessProbe: + httpGet: + scheme: HTTPS + port: 8888 + path: healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + {{- if .Values.kubeRBACProxy.resources }} + resources: + {{ toYaml .Values.kubeRBACProxy.resources | nindent 12 }} + {{- end }} + {{- if .Values.kubeRBACProxy.containerSecurityContext }} + securityContext: + {{ toYaml .Values.kubeRBACProxy.containerSecurityContext | nindent 12 }} + {{- end }} + {{- end }} + {{- if or .Values.imagePullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.imagePullSecrets) | indent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostPID: {{ .Values.hostPID }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.hostRootFsMount.enabled }} + - name: root + hostPath: + path: / + {{- end }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} + {{- end }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + emptyDir: + medium: Memory + {{- end }} + {{- range $_, $mount := .Values.sidecarHostVolumeMounts }} + - name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} + {{- end }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + configMap: + name: {{ $mount.name }} + {{- end }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ $mount.name }} + secret: + secretName: {{ $mount.name }} + {{- end }} + {{- if .Values.kubeRBACProxy.enabled }} + - name: kube-rbac-proxy-config + configMap: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config + {{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml new file mode 100644 index 00000000..45eeb8d9 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/endpoints.yaml @@ -0,0 +1,18 @@ +{{- if .Values.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +subsets: + - addresses: + {{- range .Values.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: 9100 + protocol: TCP +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml new file mode 100644 index 00000000..567f7bf3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml new file mode 100644 index 00000000..82572272 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/networkpolicy.yaml @@ -0,0 +1,23 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + ingress: + - ports: + - port: {{ .Values.service.port }} + policyTypes: + - Egress + - Ingress + podSelector: + matchLabels: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml new file mode 100644 index 00000000..f88da6a3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/podmonitor.yaml @@ -0,0 +1,91 @@ +{{- if .Values.prometheus.podMonitor.enabled }} +apiVersion: {{ .Values.prometheus.podMonitor.apiVersion | default "monitoring.coreos.com/v1" }} +kind: PodMonitor +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.podmonitor-namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.prometheus.podMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.podMonitor.jobLabel }} + {{- include "podmonitor.scrapeLimits" .Values.prometheus.podMonitor | nindent 2 }} + selector: + matchLabels: + {{- with .Values.prometheus.podMonitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "prometheus-node-exporter.namespace" . }} + {{- with .Values.prometheus.podMonitor.attachMetadata }} + attachMetadata: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + podMetricsEndpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.prometheus.podMonitor.scheme }} + scheme: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.path }} + path: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.bearerTokenSecret }} + bearerTokenSecret: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.authorization }} + authorization: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.oauth2 }} + oauth2: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.proxyUrl }} + proxyUrl: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorTimestamps }} + honorTimestamps: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.honorLabels }} + honorLabels: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.prometheus.podMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.podMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + enableHttp2: {{ default false .Values.prometheus.podMonitor.enableHttp2 }} + filterRunning: {{ default true .Values.prometheus.podMonitor.filterRunning }} + followRedirects: {{ default false .Values.prometheus.podMonitor.followRedirects }} + {{- with .Values.prometheus.podMonitor.params }} + params: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml new file mode 100644 index 00000000..89573172 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp-{{ include "prometheus-node-exporter.fullname" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ include "prometheus-node-exporter.fullname" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml new file mode 100644 index 00000000..33337017 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: psp-{{ include "prometheus-node-exporter.fullname" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ include "prometheus-node-exporter.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp.yaml new file mode 100644 index 00000000..4896c84d --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/psp.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.create .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.rbac.pspAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + privileged: false + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + - 'hostPath' + hostNetwork: true + hostIPC: false + hostPID: true + hostPorts: + - min: 0 + max: 65535 + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Allow adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Allow adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml new file mode 100644 index 00000000..814e1103 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/rbac-configmap.yaml @@ -0,0 +1,16 @@ +{{- if .Values.kubeRBACProxy.enabled}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config + namespace: {{ include "prometheus-node-exporter.namespace" . }} +data: + config-file.yaml: |+ + authorization: + resourceAttributes: + namespace: {{ template "prometheus-node-exporter.namespace" . }} + apiVersion: v1 + resource: services + subresource: {{ template "prometheus-node-exporter.fullname" . }} + name: {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/service.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/service.yaml new file mode 100644 index 00000000..068a6bc7 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/service.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" $ | nindent 4 }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if .Values.service.ipDualStack.enabled }} + ipFamilies: {{ toYaml .Values.service.ipDualStack.ipFamilies | nindent 4 }} + ipFamilyPolicy: {{ .Values.service.ipDualStack.ipFamilyPolicy }} +{{- end }} + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: {{ .Values.service.portName }} + selector: + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 4 }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml new file mode 100644 index 00000000..5c3348c0 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.rbac.create .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.serviceAccount.imagePullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "prometheus-node-exporter.imagePullSecrets" (dict "Values" .Values "imagePullSecrets" .Values.serviceAccount.imagePullSecrets) | indent 2 }} +{{- end }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml new file mode 100644 index 00000000..0d7a42ea --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: {{ .Values.prometheus.monitor.apiVersion | default "monitoring.coreos.com/v1" }} +kind: ServiceMonitor +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.monitor-namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} + {{- with .Values.prometheus.monitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ default "app.kubernetes.io/name" .Values.prometheus.monitor.jobLabel }} + {{- include "servicemonitor.scrapeLimits" .Values.prometheus.monitor | nindent 2 }} + {{- with .Values.prometheus.monitor.podTargetLabels }} + podTargetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- with .Values.prometheus.monitor.selectorOverride }} + {{- toYaml . | nindent 6 }} + {{- else }} + {{- include "prometheus-node-exporter.selectorLabels" . | nindent 6 }} + {{- end }} + {{- with .Values.prometheus.monitor.attachMetadata }} + attachMetadata: + {{- toYaml . | nindent 4 }} + {{- end }} + endpoints: + - port: {{ .Values.service.portName }} + scheme: {{ .Values.prometheus.monitor.scheme }} + {{- with .Values.prometheus.monitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.proxyUrl }} + proxyUrl: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with .Values.prometheus.monitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.prometheus.monitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml new file mode 100644 index 00000000..2c2705f8 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/templates/verticalpodautoscaler.yaml @@ -0,0 +1,40 @@ +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.verticalPodAutoscaler.enabled) }} +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: {{ include "prometheus-node-exporter.fullname" . }} + namespace: {{ include "prometheus-node-exporter.namespace" . }} + labels: + {{- include "prometheus-node-exporter.labels" . | nindent 4 }} +spec: + {{- with .Values.verticalPodAutoscaler.recommenders }} + recommenders: + {{- toYaml . | nindent 4 }} + {{- end }} + resourcePolicy: + containerPolicies: + - containerName: node-exporter + {{- with .Values.verticalPodAutoscaler.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.controlledValues }} + controlledValues: {{ . }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.verticalPodAutoscaler.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: apps/v1 + kind: DaemonSet + name: {{ include "prometheus-node-exporter.fullname" . }} + {{- with .Values.verticalPodAutoscaler.updatePolicy }} + updatePolicy: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/values.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/values.yaml new file mode 100644 index 00000000..4125fc36 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-node-exporter/values.yaml @@ -0,0 +1,478 @@ +# Default values for prometheus-node-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + registry: quay.io + repository: prometheus/node-exporter + # Overrides the image tag whose default is {{ printf "v%s" .Chart.AppVersion }} + tag: "" + pullPolicy: IfNotPresent + digest: "" + +imagePullSecrets: [] +# - name: "image-pull-secret" +nameOverride: "" +fullnameOverride: "" + +# Number of old history to retain to allow rollback +# Default Kubernetes value is set to 10 +revisionHistoryLimit: 10 + +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + # + # Allow parent charts to override registry hostname + imageRegistry: "" + +# Configure kube-rbac-proxy. When enabled, creates a kube-rbac-proxy to protect the node-exporter http endpoint. +# The requests are served through the same service but requests are HTTPS. +kubeRBACProxy: + enabled: false + image: + registry: quay.io + repository: brancz/kube-rbac-proxy + tag: v0.14.0 + sha: "" + pullPolicy: IfNotPresent + + # List of additional cli arguments to configure kube-rbac-prxy + # for example: --tls-cipher-suites, --log-file, etc. + # all the possible args can be found here: https://github.com/brancz/kube-rbac-proxy#usage + extraArgs: [] + + ## Specify security settings for a Container + ## Allows overrides and additional options compared to (Pod) securityContext + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + containerSecurityContext: {} + + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +service: + type: ClusterIP + port: 9100 + targetPort: 9100 + nodePort: + portName: metrics + listenOnAllInterfaces: true + annotations: + prometheus.io/scrape: "true" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + +# Set a NetworkPolicy with: +# ingress only on service.port +# no egress permitted +networkPolicy: + enabled: false + +# Additional environment variables that will be passed to the daemonset +env: {} +## env: +## VARIABLE: value + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + + jobLabel: "" + + # List of pod labels to add to node exporter metrics + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + podTargetLabels: [] + + scheme: http + basicAuth: {} + bearerTokenFile: + tlsConfig: {} + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Override serviceMonitor selector + ## + selectorOverride: {} + + ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + ## + attachMetadata: + node: false + + relabelings: [] + metricRelabelings: [] + interval: "" + scrapeTimeout: 10s + ## prometheus.monitor.apiVersion ApiVersion for the serviceMonitor Resource(defaults to "monitoring.coreos.com/v1") + apiVersion: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + # PodMonitor defines monitoring for a set of pods. + # ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PodMonitor + # Using a PodMonitor may be preferred in some environments where there is very large number + # of Node Exporter endpoints (1000+) behind a single service. + # The PodMonitor is disabled by default. When switching from ServiceMonitor to PodMonitor, + # the time series resulting from the configuration through PodMonitor may have different labels. + # For instance, there will not be the service label any longer which might + # affect PromQL queries selecting that label. + podMonitor: + enabled: false + # Namespace in which to deploy the pod monitor. Defaults to the release namespace. + namespace: "" + # Additional labels, e.g. setting a label for pod monitor selector as set in prometheus + additionalLabels: {} + # release: kube-prometheus-stack + # PodTargetLabels transfers labels of the Kubernetes Pod onto the target. + podTargetLabels: [] + # apiVersion defaults to monitoring.coreos.com/v1. + apiVersion: "" + # Override pod selector to select pod objects. + selectorOverride: {} + # Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + attachMetadata: + node: false + # The label to use to retrieve the job name from. Defaults to label app.kubernetes.io/name. + jobLabel: "" + + # Scheme/protocol to use for scraping. + scheme: "http" + # Path to scrape metrics at. + path: "/metrics" + + # BasicAuth allow an endpoint to authenticate over basic authentication. + # More info: https://prometheus.io/docs/operating/configuration/#endpoint + basicAuth: {} + # Secret to mount to read bearer token for scraping targets. + # The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator. + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#secretkeyselector-v1-core + bearerTokenSecret: {} + # TLS configuration to use when scraping the endpoint. + tlsConfig: {} + # Authorization section for this endpoint. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.SafeAuthorization + authorization: {} + # OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.OAuth2 + oauth2: {} + + # ProxyURL eg http://proxyserver:2195. Directs scrapes through proxy to this endpoint. + proxyUrl: "" + # Interval at which endpoints should be scraped. If not specified Prometheus’ global scrape interval is used. + interval: "" + # Timeout after which the scrape is ended. If not specified, the Prometheus global scrape interval is used. + scrapeTimeout: "" + # HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + honorTimestamps: true + # HonorLabels chooses the metric’s labels on collisions with target labels. + honorLabels: true + # Whether to enable HTTP2. Default false. + enableHttp2: "" + # Drop pods that are not running. (Failed, Succeeded). + # Enabled by default. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase + filterRunning: "" + # FollowRedirects configures whether scrape requests follow HTTP 3xx redirects. Default false. + followRedirects: "" + # Optional HTTP URL parameters + params: {} + + # RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds + # relabelings for a few standard Kubernetes fields. The original scrape job’s name + # is available via the __tmp_prometheus_job_name label. + # More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + relabelings: [] + # MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + + # SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + sampleLimit: 0 + # TargetLimit defines a limit on the number of scraped targets that will be accepted. + targetLimit: 0 + # Per-scrape limit on number of labels that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelLimit: 0 + # Per-scrape limit on length of labels name that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelNameLengthLimit: 0 + # Per-scrape limit on length of labels value that will be accepted for a sample. + # Only valid in Prometheus versions 2.27.0 and newer. + labelValueLengthLimit: 0 + +## Customize the updateStrategy if set +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + imagePullSecrets: [] + automountServiceAccountToken: false + +securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + +containerSecurityContext: + readOnlyRootFilesystem: true + # capabilities: + # add: + # - SYS_TIME + +rbac: + ## If true, create & use RBAC resources + ## + create: true + ## If true, create & use Pod Security Policy resources + ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + pspEnabled: true + pspAnnotations: {} + +# for deployments that have node_exporter deployed outside of the cluster, list +# their addresses here +endpoints: [] + +# Expose the service to the host network +hostNetwork: true + +# Share the host process ID namespace +hostPID: true + +# Mount the node's root file system (/) at /host/root in the container +hostRootFsMount: + enabled: true + # Defines how new mounts in existing mounts on the node or in the container + # are propagated to the container or node, respectively. Possible values are + # None, HostToContainer, and Bidirectional. If this field is omitted, then + # None is used. More information on: + # https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation + mountPropagation: HostToContainer + +## Assign a group of affinity scheduling rules +## +affinity: {} +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchFields: +# - key: metadata.name +# operator: In +# values: +# - target-host-name + +# Annotations to be added to node exporter pods +podAnnotations: + # Fix for very slow GKE cluster upgrades + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + +# Extra labels to be added to node exporter pods +podLabels: {} + +# Annotations to be added to node exporter daemonset +daemonsetAnnotations: {} + +## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box +releaseLabel: false + +# Custom DNS configuration to be added to prometheus-node-exporter pods +dnsConfig: {} +# nameservers: +# - 1.2.3.4 +# searches: +# - ns1.svc.cluster-domain.example +# - my.dns.search.suffix +# options: +# - name: ndots +# value: "2" +# - name: edns0 + +## Assign a nodeSelector if operating a hybrid cluster +## +nodeSelector: + kubernetes.io/os: linux + # kubernetes.io/arch: amd64 + +tolerations: + - effect: NoSchedule + operator: Exists + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +## Additional container arguments +## +extraArgs: [] +# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$ +# - --collector.textfile.directory=/run/prometheus + +## Additional mounts from the host to node-exporter container +## +extraHostVolumeMounts: [] +# - name: +# hostPath: +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +## Additional configmaps to be mounted. +## +configmaps: [] +# - name: +# mountPath: +secrets: [] +# - name: +# mountPath: +## Override the deployment namespace +## +namespaceOverride: "" + +## Additional containers for export metrics to text file +## +sidecars: [] +## - name: nvidia-dcgm-exporter +## image: nvidia/dcgm-exporter:1.4.3 + +## Volume for sidecar containers +## +sidecarVolumeMount: [] +## - name: collector-textfiles +## mountPath: /run/prometheus +## readOnly: false + +## Additional mounts from the host to sidecar containers +## +sidecarHostVolumeMounts: [] +# - name: +# hostPath: +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +## Liveness probe +## +livenessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +## Readiness probe +## +readinessProbe: + failureThreshold: 3 + httpGet: + httpHeaders: [] + scheme: http + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + +# Enable vertical pod autoscaler support for prometheus-node-exporter +verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + # updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + # updateMode: Auto + +# Extra manifests to deploy as an array +extraManifests: [] + # - apiVersion: v1 + # kind: ConfigMap + # metadata: + # name: prometheus-extra + # data: + # extra-data: "value" diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/.helmignore b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/.helmignore new file mode 100644 index 00000000..e90c9f6d --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/.helmignore @@ -0,0 +1,24 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +# OWNERS file for Kubernetes +OWNERS \ No newline at end of file diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/Chart.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/Chart.yaml new file mode 100644 index 00000000..39b28023 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts +apiVersion: v2 +appVersion: v1.6.0 +description: A Helm chart for prometheus pushgateway +home: https://github.com/prometheus/pushgateway +keywords: +- pushgateway +- prometheus +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: christian.staude@staffbase.com + name: cstaud +- email: rootsandtrees@posteo.de + name: zeritti +name: prometheus-pushgateway +sources: +- https://github.com/prometheus/pushgateway +type: application +version: 2.4.0 diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/README.md b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/README.md new file mode 100644 index 00000000..1865d471 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/README.md @@ -0,0 +1,88 @@ +# Prometheus Pushgateway + +This chart bootstraps a prometheus [pushgateway](http://github.com/prometheus/pushgateway) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +An optional prometheus `ServiceMonitor` can be enabled, should you wish to use this gateway with a [Prometheus Operator](https://github.com/coreos/prometheus-operator). + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus-pushgateway +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 2.0.0 + +Chart API version has been upgraded to v2 so Helm 3 is needed from now on. + +Docker image tag is used from Chart.yaml appVersion field by default now. + +Version 2.0.0 also adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). Specifically, labels mapping is listed below: + +```console +OLD => NEW +---------------------------------------- +heritage => app.kubernetes.io/managed-by +chart => helm.sh/chart +[container version] => app.kubernetes.io/version +app => app.kubernetes.io/name +release => app.kubernetes.io/instance +``` + +Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. + +If `runAsStatefulSet: false` (this is the default): + +```console +kubectl delete deploy -l app=prometheus-pushgateway +``` + +If `runAsStatefulSet: true`: + +```console +kubectl delete sts -l app=prometheus-pushgateway +``` + +After that do the actual upgrade: + +```console +helm upgrade -i prometheus-pushgateway prometheus-community/prometheus-pushgateway +``` + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus-pushgateway +``` diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt new file mode 100644 index 00000000..0196e2b3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/NOTES.txt @@ -0,0 +1,19 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.hosts }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-pushgateway.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus-pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus-pushgateway.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9091 to use your application" + kubectl port-forward $POD_NAME 9091 +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl new file mode 100644 index 00000000..b56a2dad --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/_helpers.tpl @@ -0,0 +1,208 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-pushgateway.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Namespace to set on the resources +*/}} +{{- define "prometheus-pushgateway.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-pushgateway.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-pushgateway.chart" -}} +{{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-pushgateway.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "prometheus-pushgateway.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Create default labels +*/}} +{{- define "prometheus-pushgateway.defaultLabels" -}} +helm.sh/chart: {{ include "prometheus-pushgateway.chart" . }} +{{ include "prometheus-pushgateway.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.podLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "prometheus-pushgateway.selectorLabels" -}} +app.kubernetes.io/name: {{ include "prometheus-pushgateway.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus-pushgateway.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion }} +{{- print "extensions/v1beta1" }} +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion }} +{{- print "networking.k8s.io/v1" }} +{{- end }} +{{- end }} + +{{/* +Define PDB apiVersion +*/}} +{{- define "prometheus-pushgateway.pdb.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Define Ingress apiVersion +*/}} +{{- define "prometheus-pushgateway.ingress.apiVersion" -}} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} +{{- print "networking.k8s.io/v1" }} +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Returns pod spec +*/}} +{{- define "prometheus-pushgateway.podSpec" -}} +serviceAccountName: {{ include "prometheus-pushgateway.serviceAccountName" . }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . | quote }} +{{- end }} +{{- with .Values.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.extraInitContainers }} +initContainers: + {{- toYaml . | nindent 2 }} +{{- end }} +containers: + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 2 }} + {{- end }} + - name: pushgateway + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.extraVars }} + env: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.extraArgs }} + args: + {{- toYaml . | nindent 6 }} + {{- end }} + ports: + - name: metrics + containerPort: 9091 + protocol: TCP + {{- if .Values.liveness.enabled }} + livenessProbe: + {{- toYaml .Values.liveness.probe | nindent 6 }} + {{- end }} + {{- if .Values.readiness.enabled }} + readinessProbe: + {{- toYaml .Values.readiness.probe | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.persistentVolume.mountPath }}" + subPath: "{{ .Values.persistentVolume.subPath }}" + {{- with .Values.extraVolumeMounts }} + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + {{- $storageVolumeAsPVCTemplate := and .Values.runAsStatefulSet .Values.persistentVolume.enabled -}} + {{- if not $storageVolumeAsPVCTemplate }} + - name: storage-volume + {{- if .Values.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistentVolume.existingClaim }}{{ .Values.persistentVolume.existingClaim }}{{- else }}{{ include "prometheus-pushgateway.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 2 }} + {{- else if $storageVolumeAsPVCTemplate }} + [] + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml new file mode 100644 index 00000000..557ca6f0 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/deployment.yaml @@ -0,0 +1,28 @@ +{{- if not .Values.runAsStatefulSet }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + {{- with .Values.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }} + spec: + {{- include "prometheus-pushgateway.podSpec" . | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml new file mode 100644 index 00000000..237ac4a1 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/ingress.yaml @@ -0,0 +1,50 @@ +{{- if .Values.ingress.enabled }} +{{- $serviceName := include "prometheus-pushgateway.fullname" . }} +{{- $servicePort := .Values.service.port }} +{{- $ingressPath := .Values.ingress.path }} +{{- $ingressClassName := .Values.ingress.className }} +{{- $ingressPathType := .Values.ingress.pathType }} +{{- $extraPaths := .Values.ingress.extraPaths }} +apiVersion: {{ include "prometheus-pushgateway.ingress.apiVersion" . }} +kind: Ingress +metadata: + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} + ingressClassName: {{ $ingressClassName }} + {{- end }} + rules: + {{- range $host := .Values.ingress.hosts }} + - host: {{ $host }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml new file mode 100644 index 00000000..d3b8019e --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/networkpolicy.yaml @@ -0,0 +1,26 @@ +{{- if .Values.networkPolicy }} +apiVersion: {{ include "prometheus-pushgateway.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- if .Values.networkPolicy.customSelectors }} + name: ingress-allow-customselector-{{ template "prometheus-pushgateway.name" . }} + {{- else if .Values.networkPolicy.allowAll }} + name: ingress-allow-all-{{ template "prometheus-pushgateway.name" . }} + {{- else -}} + {{- fail "One of `allowAll` or `customSelectors` must be specified." }} + {{- end }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + podSelector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- with .Values.networkPolicy.customSelectors }} + from: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml new file mode 100644 index 00000000..6051133c --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "prometheus-pushgateway.pdb.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml new file mode 100644 index 00000000..d2a85f42 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/pushgateway-pvc.yaml @@ -0,0 +1,29 @@ +{{- if and (not .Values.runAsStatefulSet) .Values.persistentVolume.enabled (not .Values.persistentVolume.existingClaim) }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- with .Values.persistentVolume.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.persistentVolumeLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + accessModes: + {{- toYaml .Values.persistentVolume.accessModes | nindent 4 }} + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/service.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/service.yaml new file mode 100644 index 00000000..7a3562cd --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/service.yaml @@ -0,0 +1,41 @@ +{{- $stsNoHeadlessSvcTypes := list "LoadBalancer" "NodePort" -}} +apiVersion: v1 +kind: Service +metadata: + {{- with .Values.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.serviceLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{ else if and .Values.runAsStatefulSet (not (has .Values.service.type $stsNoHeadlessSvcTypes)) }} + clusterIP: None # Headless service + {{- end }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + {{- if and (eq .Values.service.type "NodePort") .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + protocol: TCP + name: http + selector: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 4 }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml new file mode 100644 index 00000000..ab5e2452 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- with .Values.serviceAccountLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.serviceAccountName" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml new file mode 100644 index 00000000..5e3f75a1 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/servicemonitor.yaml @@ -0,0 +1,51 @@ +{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + {{- if .Values.serviceMonitor.additionalLabels }} + {{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + name: {{ include "prometheus-pushgateway.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- else }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} + {{- end }} +spec: + endpoints: + - port: http + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scheme }} + scheme: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.bearerTokenFile }} + bearerTokenFile: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .| nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + path: {{ .Values.serviceMonitor.telemetryPath }} + honorLabels: {{ .Values.serviceMonitor.honorLabels }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- tpl (toYaml . | nindent 6) $ }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "prometheus-pushgateway.namespace" . }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml new file mode 100644 index 00000000..8d486a30 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/templates/statefulset.yaml @@ -0,0 +1,49 @@ +{{- if .Values.runAsStatefulSet }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 4 }} + name: {{ include "prometheus-pushgateway.fullname" . }} + namespace: {{ template "prometheus-pushgateway.namespace" . }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "prometheus-pushgateway.fullname" . }} + selector: + matchLabels: + {{- include "prometheus-pushgateway.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 8 }} + spec: + {{- include "prometheus-pushgateway.podSpec" . | nindent 6 }} + {{- if .Values.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + {{- with .Values.persistentVolume.annotations }} + annotations: + {{- toYaml . | nindent 10 }} + {{- end }} + labels: + {{- include "prometheus-pushgateway.defaultLabels" . | nindent 10 }} + name: storage-volume + spec: + accessModes: + {{ toYaml .Values.persistentVolume.accessModes }} + {{- if .Values.persistentVolume.storageClass }} + {{- if (eq "-" .Values.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistentVolume.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.persistentVolume.size }}" + {{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/values.yaml b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/values.yaml new file mode 100644 index 00000000..02e5c0bf --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/charts/prometheus-pushgateway/values.yaml @@ -0,0 +1,330 @@ +# Default values for prometheus-pushgateway. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Provide a name in place of prometheus-pushgateway for `app:` labels +nameOverride: "" + +# Provide a name to substitute for the full names of resources +fullnameOverride: "" + +# Provide a namespace to substitude for the namespace on resources +namespaceOverride: "" + +image: + repository: quay.io/prometheus/pushgateway + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + +# Optional pod imagePullSecrets +imagePullSecrets: [] + +service: + type: ClusterIP + port: 9091 + targetPort: 9091 + # nodePort: 32100 + + # Optional - Can be used for headless if value is "None" + clusterIP: "" + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + +# Optional pod annotations +podAnnotations: {} + +# Optional pod labels +podLabels: {} + +# Optional service annotations +serviceAnnotations: {} + +# Optional service labels +serviceLabels: {} + +# Optional serviceAccount labels +serviceAccountLabels: {} + +# Optional persistentVolume labels +persistentVolumeLabels: {} + +# Optional additional environment variables +extraVars: [] + +## Additional pushgateway container arguments +## +## example: +## extraArgs: +## - --persistence.file=/data/pushgateway.data +## - --persistence.interval=5m +extraArgs: [] + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] + +# Optional additional containers (sidecar) +extraContainers: [] + # - name: oAuth2-proxy + # args: + # - -https-address=:9092 + # - -upstream=http://localhost:9091 + # - -skip-auth-regex=^/metrics + # - -openshift-delegate-urls={"/":{"group":"monitoring.coreos.com","resource":"prometheuses","verb":"get"}} + # image: openshift/oauth-proxy:v1.1.0 + # ports: + # - containerPort: 9092 + # name: proxy + # resources: + # limits: + # memory: 16Mi + # requests: + # memory: 4Mi + # cpu: 20m + # volumeMounts: + # - mountPath: /etc/prometheus/secrets/pushgateway-tls + # name: secret-pushgateway-tls + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +liveness: + enabled: true + probe: + httpGet: + path: /-/healthy + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + +readiness: + enabled: true + probe: + httpGet: + path: /-/ready + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Configure ingress resource that allow you to access the +## pushgateway installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Enable Ingress. + ## + enabled: false + # AWS ALB requires path of /* + className: "" + path: / + pathType: ImplementationSpecific + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Annotations. + ## + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - pushgateway.domain.com + + ## TLS configuration. + ## Secrets must be manually created in the namespace. + ## + # tls: + # - secretName: pushgateway-tls + # hosts: + # - pushgateway.domain.com + +tolerations: [] + # - effect: NoSchedule + # operator: Exists + +## Node labels for pushgateway pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +replicaCount: 1 + +## When running more than one replica alongside with persistence, different volumes are needed +## per replica, since sharing a `persistence.file` across replicas does not keep metrics synced. +## For this purpose, you can enable the `runAsStatefulSet` to deploy the pushgateway as a +## StatefulSet instead of as a Deployment. +runAsStatefulSet: false + +## Security context to be added to push-gateway pods +## +securityContext: + fsGroup: 65534 + runAsUser: 65534 + runAsNonRoot: true + +## Security context to be added to push-gateway containers +## Having a separate variable as securityContext differs for pods and containers. +containerSecurityContext: {} +# allowPrivilegeEscalation: false +# readOnlyRootFilesystem: true +# runAsUser: 65534 +# runAsNonRoot: true + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Topology spread constraints for pods +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +topologySpreadConstraints: [] + +# Enable this if you're using https://github.com/coreos/prometheus-operator +serviceMonitor: + enabled: false + namespace: monitoring + + # telemetryPath: HTTP resource path from which to fetch metrics. + # Telemetry path, default /metrics, has to be prefixed accordingly if pushgateway sets a route prefix at start-up. + # + telemetryPath: "/metrics" + + # Fallback to the prometheus default unless specified + # interval: 10s + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + # scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + # tlsConfig: {} + + # bearerTokenFile: + # Fallback to the prometheus default unless specified + # scrapeTimeout: 30s + + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + # Retain the job and instance labels of the metrics pushed to the Pushgateway + # [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape) + honorLabels: true + + ## Metric relabel configs to apply to samples before ingestion. + ## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Relabel configs to apply to samples before ingestion. + ## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +# The values to set in the PodDisruptionBudget spec (minAvailable/maxUnavailable) +# If not set then a PodDisruptionBudget will not be created +podDisruptionBudget: {} + +priorityClassName: + +# Deployment Strategy type +strategy: + type: Recreate + +persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + +extraVolumes: [] + # - name: extra + # emptyDir: {} +extraVolumeMounts: [] + # - name: extra + # mountPath: /usr/share/extras + # readOnly: true + +# Configuration for clusters with restrictive network policies in place: +# - allowAll allows access to the PushGateway from any namespace +# - customSelector is a list of pod/namespaceSelectors to allow access from +# These options are mutually exclusive and the latter will take precedence. +networkPolicy: {} + # allowAll: true + # customSelectors: + # - namespaceSelector: + # matchLabels: + # type: admin + # - podSelector: + # matchLabels: + # app: myapp diff --git a/install-bundler/install-prometheus/prometheus/templates/NOTES.txt b/install-bundler/install-prometheus/prometheus/templates/NOTES.txt new file mode 100644 index 00000000..fc03c2a5 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/NOTES.txt @@ -0,0 +1,113 @@ +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} + + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.port }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" .Subcharts.alertmanager }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistence.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if (index .Values "prometheus-node-exporter" "enabled") }} +################################################################################# +###### WARNING: Pod Security Policy has been disabled by default since ##### +###### it deprecated after k8s 1.25+. use ##### +###### (index .Values "prometheus-node-exporter" "rbac" ##### +###### . "pspEnabled") with (index .Values ##### +###### "prometheus-node-exporter" "rbac" "pspAnnotations") ##### +###### in case you still need it. ##### +################################################################################# +{{- end }} + +{{ if (index .Values "prometheus-pushgateway" "enabled") }} +The Prometheus PushGateway can be accessed via port {{ index .Values "prometheus-pushgateway" "service" "port" }} on the following DNS name from within your cluster: +{{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if (index .Values "prometheus-pushgateway" "ingress" "enabled") -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range (index .Values "prometheus-pushgateway" "ingress" "hosts") }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- $pushgateway_svc_type := index .Values "prometheus-pushgateway" "service" "type" -}} +{{- if contains "NodePort" $pushgateway_svc_type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" $pushgateway_svc_type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ index .Values "prometheus-pushgateway" "service" "port" }} +{{- else if contains "ClusterIP" $pushgateway_svc_type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "prometheus.name" (index .Subcharts "prometheus-pushgateway") }},component=pushgateway" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/install-bundler/install-prometheus/prometheus/templates/_helpers.tpl b/install-bundler/install-prometheus/prometheus/templates/_helpers.tpl new file mode 100644 index 00000000..0f4c6318 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/_helpers.tpl @@ -0,0 +1,213 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create labels for prometheus +*/}} +{{- define "prometheus.common.matchLabels" -}} +app.kubernetes.io/name: {{ include "prometheus.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.metaLabels" -}} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +helm.sh/chart: {{ include "prometheus.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: {{ include "prometheus.name" . }} +{{- with .Values.commonMetaLabels}} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +app.kubernetes.io/component: {{ .Values.server.name }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified ClusterRole name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.clusterRoleName" -}} +{{- if .Values.server.clusterRoleNameOverride -}} +{{ .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{ include "prometheus.server.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name for communicating with the user via NOTES.txt +*/}} +{{- define "prometheus.alertmanager.fullname" -}} +{{- template "alertmanager.fullname" .Subcharts.alertmanager -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "prometheus.podDisruptionBudget.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +{{- print "policy/v1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} + {{- default .Release.Namespace .Values.forceNamespace -}} +{{- end }} + +{{/* +Define template prometheus.namespaces producing a list of namespaces to monitor +*/}} +{{- define "prometheus.namespaces" -}} +{{- $namespaces := list }} +{{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName }} + {{- if .Values.server.namespaces -}} + {{- range $ns := join "," .Values.server.namespaces | split "," }} + {{- $namespaces = append $namespaces (tpl $ns $) }} + {{- end -}} + {{- end -}} + {{- if .Values.server.releaseNamespace -}} + {{- $namespaces = append $namespaces (include "prometheus.namespace" .) }} + {{- end -}} +{{- end -}} +{{ mustToJson $namespaces }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/clusterrole.yaml b/install-bundler/install-prometheus/prometheus/templates/clusterrole.yaml new file mode 100644 index 00000000..1ca509e3 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/clusterrole.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +rules: +{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/clusterrolebinding.yaml b/install-bundler/install-prometheus/prometheus/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..28f4bda7 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "prometheus.clusterRoleName" . }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/cm.yaml b/install-bundler/install-prometheus/prometheus/templates/cm.yaml new file mode 100644 index 00000000..a702b527 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/cm.yaml @@ -0,0 +1,95 @@ +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.extraConfigmapLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +data: + allow-snippet-annotations: "false" +{{- $root := . -}} +{{- range $key, $value := .Values.ruleFiles }} + {{ $key }}: {{- toYaml $value | indent 2 }} +{{- end }} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- if or $root.Values.server.tsdb $root.Values.server.exemplars }} + storage: +{{- if $root.Values.server.tsdb }} + tsdb: +{{ $root.Values.server.tsdb | toYaml | indent 8 }} +{{- end }} +{{- if $root.Values.server.exemplars }} + exemplars: +{{ $root.Values.server.exemplars | toYaml | indent 8 }} +{{- end }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: {{ $root.Release.Name }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: {{ default "alertmanager" $root.Values.alertmanager.nameOverride | trunc 63 | trimSuffix "-" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/deploy.yaml b/install-bundler/install-prometheus/prometheus/templates/deploy.yaml new file mode 100644 index 00000000..cf243ea0 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/deploy.yaml @@ -0,0 +1,353 @@ +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + {{- if .Values.configmapReload.prometheus.image.digest }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" + {{- else }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + {{- end }} + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --watched-dir=/etc/config + {{- $default_url := "http://127.0.0.1:9090/-/reload" }} + {{- with .Values.server.prefixURL }} + {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} + {{- end }} + - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --watched-dir={{ . }} + {{- end }} + {{- with .Values.configmapReload.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- with .Values.configmapReload.prometheus.extraVolumeMounts }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + {{- if .Values.server.image.digest }} + image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" + {{- else }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion}}" + {{- end }} + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- with .Values.server.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.portName }} + name: {{ .Values.server.portName }} + {{- end }} + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.server.hostNetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + {{- else }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + {{- with .Values.server.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/extra-manifests.yaml b/install-bundler/install-prometheus/prometheus/templates/extra-manifests.yaml new file mode 100644 index 00000000..2b21b710 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl . $ }} +{{ end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/headless-svc.yaml b/install-bundler/install-prometheus/prometheus/templates/headless-svc.yaml new file mode 100644 index 00000000..df9db991 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/headless-svc.yaml @@ -0,0 +1,35 @@ +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless + namespace: {{ include "prometheus.namespace" . }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/ingress.yaml b/install-bundler/install-prometheus/prometheus/templates/ingress.yaml new file mode 100644 index 00000000..fc2468d8 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/network-policy.yaml b/install-bundler/install-prometheus/prometheus/templates/network-policy.yaml new file mode 100644 index 00000000..3254ffc0 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/network-policy.yaml @@ -0,0 +1,16 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/pdb.yaml b/install-bundler/install-prometheus/prometheus/templates/pdb.yaml new file mode 100644 index 00000000..7ffe6730 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/pdb.yaml @@ -0,0 +1,15 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +{{- $pdbSpec := omit .Values.server.podDisruptionBudget "enabled" }} +apiVersion: {{ template "prometheus.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + {{- toYaml $pdbSpec | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/psp.yaml b/install-bundler/install-prometheus/prometheus/templates/psp.yaml new file mode 100644 index 00000000..5776e254 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/psp.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.podSecurityPolicy.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/pvc.yaml b/install-bundler/install-prometheus/prometheus/templates/pvc.yaml new file mode 100644 index 00000000..25dfe021 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/pvc.yaml @@ -0,0 +1,40 @@ +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- if .Values.server.persistentVolume.selector }} + selector: + {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- if .Values.server.persistentVolume.volumeName }} + volumeName: "{{ .Values.server.persistentVolume.volumeName }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/rolebinding.yaml b/install-bundler/install-prometheus/prometheus/templates/rolebinding.yaml new file mode 100644 index 00000000..721b3881 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- range include "prometheus.namespaces" . | fromJsonArray }} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} + namespace: {{ include "prometheus.namespace" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/service.yaml b/install-bundler/install-prometheus/prometheus/templates/service.yaml new file mode 100644 index 00000000..1aa384eb --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/service.yaml @@ -0,0 +1,60 @@ +{{- if .Values.server.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/templates/serviceaccount.yaml b/install-bundler/install-prometheus/prometheus/templates/serviceaccount.yaml new file mode 100644 index 00000000..273aa7ee --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/sts.yaml b/install-bundler/install-prometheus/prometheus/templates/sts.yaml new file mode 100644 index 00000000..811190da --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/sts.yaml @@ -0,0 +1,375 @@ +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if semverCompare ">= 1.27.x" (include "prometheus.kubeVersion" .) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsDelete }} + whenScaled: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsScale }} + {{- end }} + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + {{- if .Values.configmapReload.prometheus.image.digest }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" + {{- else }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + {{- end }} + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --watched-dir=/etc/config + {{- $default_url := "http://127.0.0.1:9090/-/reload" }} + {{- with .Values.server.prefixURL }} + {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} + {{- end }} + - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --watched-dir={{ . }} + {{- end }} + {{- with .Values.configmapReload.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + {{- if .Values.server.image.digest }} + image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" + {{- else }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- with .Values.server.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.portName }} + name: {{ .Values.server.portName }} + {{- end }} + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: {{ ternary .Values.server.persistentVolume.statefulSetNameOverride "storage-volume" (and .Values.server.persistentVolume.enabled (not (empty .Values.server.persistentVolume.statefulSetNameOverride))) }} + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + {{- with .Values.server.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.server.persistentVolume.statefulSetNameOverride | default "storage-volume" }} + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + {{- if .Values.server.persistentVolume.labels }} + labels: +{{ toYaml .Values.server.persistentVolume.labels | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/install-bundler/install-prometheus/prometheus/templates/vpa.yaml b/install-bundler/install-prometheus/prometheus/templates/vpa.yaml new file mode 100644 index 00000000..cd07ad8b --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/templates/vpa.yaml @@ -0,0 +1,26 @@ +{{- if .Values.server.verticalAutoscaler.enabled -}} +{{- if .Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler" }} +apiVersion: autoscaling.k8s.io/v1 +{{- else }} +apiVersion: autoscaling.k8s.io/v1beta2 +{{- end }} +kind: VerticalPodAutoscaler +metadata: + name: {{ template "prometheus.server.fullname" . }}-vpa + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} diff --git a/install-bundler/install-prometheus/prometheus/values.yaml b/install-bundler/install-prometheus/prometheus/values.yaml new file mode 100644 index 00000000..356ade22 --- /dev/null +++ b/install-bundler/install-prometheus/prometheus/values.yaml @@ -0,0 +1,1199 @@ +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: [] +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + server: + create: true + name: "" + annotations: {} + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + ## URL for configmap-reload to use for reloads + ## + reloadUrl: "" + + ## env sets environment variables to pass to the container. Can be set as name/value pairs, + ## read from secrets or configmaps. + env: [] + # - name: SOMEVAR + # value: somevalue + # - name: PASSWORD + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: password + # optional: false + + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.67.0 + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + ## Additional configmap-reload volume mounts + ## + extraVolumeMounts: [] + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + ## Security context to be added to configmap-reload container + containerSecurityContext: {} + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +server: + ## Prometheus server container name + ## + name: server + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## If set it will override prometheus.server.fullname value for ClusterRole and ClusterRoleBinding + ## + clusterRoleNameOverride: "" + + # Enable only the release namespace for monitoring. By default all namespaces are monitored. + # If releaseNamespace and namespaces are both set a merged list will be monitored. + releaseNamespace: false + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + # if not set appVersion field from Chart.yaml is used + tag: "" + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + ## Prometheus server command + ## + command: [] + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + # List of flags to override default parameters, e.g: + # - --enable-feature=agent + # - --storage.agent.retention.max-time=30m + defaultFlagsOverride: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 7s + ## How long until a scrape request times out + ## + scrape_timeout: 4s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + ## + tsdb: {} + # out_of_order_time_window: 0s + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#exemplars + ## Must be enabled via --enable-feature=exemplar-storage + ## + exemplars: {} + # max_exemplars: 100000 + + ## Custom HTTP headers for Liveness/Readiness/Startup Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + # - name: "Authorization" + # value: "Bearer ABCDEabcde12345" + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## Extra labels for Prometheus server ConfigMap (ConfigMap that holds serverFiles) + extraConfigmapLabels: {} + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + strategy: + type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## Pod topology spread constraints + ## ref. https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + # minAvailable: 1 + ## unhealthyPodEvictionPolicy is available since 1.27.0 (beta) + ## https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy + # unhealthyPodEvictionPolicy: IfHealthyBudget + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## If set it will override the name of the created persistent volume claim + ## generated by the stateful set. + ## + statefulSetNameOverride: "" + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume labels + ## + labels: {} + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 200Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + ## Persistent Volume Name + ## Useful if Persistent Volumes have been provisioned in advance and you want to use a specific one + ## + # volumeName: "" + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Number of old history to retain to allow rollback + ## Default Kubernetes value is set to 10 + ## + revisionHistoryLimit: 10 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Statefulset's persistent volume claim retention policy + ## pvcDeleteOnStsDelete and pvcDeleteOnStsScale determine whether + ## statefulset's PVCs are deleted (true) or retained (false) on scaling down + ## and deleting statefulset, respectively. Requires 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + pvcDeleteOnStsDelete: false + pvcDeleteOnStsScale: false + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + tcpSocketProbeEnabled: false + probeScheme: HTTP + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + startupProbe: + enabled: false + periodSeconds: 5 + failureThreshold: 30 + timeoutSeconds: 10 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + requests: {} + limits: {} + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, this will set to ClusterFirstWithHostNet automatically + dnsPolicy: ClusterFirst + + # Use hostPort + # hostPort: 9090 + + # Use portName + portName: "" + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + ## Security context to be added to server container + ## + containerSecurityContext: {} + + service: + ## If false, no Service will be created for the Prometheus server + ## + enabled: true + + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) +ruleFiles: {} + +## Prometheus server ConfigMap entries +## +serverFiles: + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Metric relabel configs to apply to samples before ingestion. + # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + # metric_relabel_configs: + # - action: labeldrop + # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: "" + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: {} + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: "" + +# Extra manifests to deploy as an array +extraManifests: [] + # - | + # apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" + +# Configuration of subcharts defined in Chart.yaml + +## alertmanager sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager +## +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + persistence: + size: 2Gi + + podSecurityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +kube-state-metrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## promtheus-node-exporter sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter +## +prometheus-node-exporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + rbac: + pspEnabled: false + + containerSecurityContext: + allowPrivilegeEscalation: false + +## pprometheus-pushgateway sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway +## +prometheus-pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + # Optional service annotations + serviceAnnotations: + prometheus.io/probe: pushgateway diff --git a/install-bundler/install-rabbitmq/custom-values.yaml b/install-bundler/install-rabbitmq/custom-values.yaml new file mode 100644 index 00000000..c64db027 --- /dev/null +++ b/install-bundler/install-rabbitmq/custom-values.yaml @@ -0,0 +1,58 @@ + +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + username: user + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + password: "" + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +##https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/v3.12.0/rabbitmq_delayed_message_exchange-3.12.0.ez +communityPlugins: "https://github.com/rabbitmq/rabbitmq-delayed-message-exchange/releases/download/v3.12.0/rabbitmq_delayed_message_exchange-3.12.0.ez" +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap rabbitmq_delayed_message_exchange" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: true + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: true + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause-minority` or `pause-if-all-down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal + +resources: + ## Example: + limits: + cpu: 250m + memory: 250Mi + requests: + cpu: 512m + memory: 750Mi + +replicaCount: 3 diff --git a/install-bundler/install-rabbitmq/install.sh b/install-bundler/install-rabbitmq/install.sh new file mode 100755 index 00000000..578b153e --- /dev/null +++ b/install-bundler/install-rabbitmq/install.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e + +NAMESPACE=$1 +REPLICA_COUNT=$2 +# Check if the required arguments are passed +if [ "$#" -ne 2 ]; then + echo "Usage: $0 for RabbitMQ" + exit 1 +fi + +HELM_RELEASE="rabbitmq" + +export HELM_EXPERIMENTAL_OCI=1 + +printf "\n${GREEN} ####### Deploying $HELM_RELEASE to $NAMESPACE ${NC}####### \n"; +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/rabbitmq/" \ + -f "$DIR/rabbitmq/values.yaml" \ + -f "$DIR/custom-values.yaml" \ + -n "$NAMESPACE" \ + --set auth.username=RMQUsername \ + --set auth.password=RMQpassword \ + --set auth.erlangCookie=secretcookie \ + --set persistence.storageClass="" \ + --set metrics.enabled=true \ + --set metrics.serviceMonitor.enabled=false \ + --set replicaCount="$REPLICA_COUNT" \ + --set clustering.rebalance=true \ + --set clustering.forceBoot=true \ + --set nameOverride=$HELM_RELEASE +printf "[RABBITMQ] deployement completed"; +echo "To connect to the rabitMQ" +echo "To test connections" +echo "kubectl run curl-test --namespace $NAMESPACE --image=busybox --rm -it -- /bin/sh " +echo "curl -i -u RMQUsername:RMQpassword http://rabbitmq.testing.svc.cluster.local:15672/api/overview" \ No newline at end of file diff --git a/install-bundler/install-rabbitmq/rabbitmq/.helmignore b/install-bundler/install-rabbitmq/rabbitmq/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-rabbitmq/rabbitmq/Chart.lock b/install-bundler/install-rabbitmq/rabbitmq/Chart.lock new file mode 100644 index 00000000..8bbc74d6 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.13.3 +digest: sha256:9a971689db0c66ea95ac2e911c05014c2b96c6077c991131ff84f2982f88fb83 +generated: "2023-10-18T02:39:00.37546044Z" diff --git a/install-bundler/install-rabbitmq/rabbitmq/Chart.yaml b/install-bundler/install-rabbitmq/rabbitmq/Chart.yaml new file mode 100644 index 00000000..0ffbe53c --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + category: Infrastructure + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r90 + - name: rabbitmq + image: docker.io/bitnami/rabbitmq:3.12.7-debian-11-r0 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.12.7 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: RabbitMQ is an open source general-purpose message broker that is designed + for consistent, highly-available messaging scenarios (both synchronous and asynchronous). +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +keywords: +- rabbitmq +- message queue +- AMQP +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: rabbitmq +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq +version: 12.3.0 diff --git a/install-bundler/install-rabbitmq/rabbitmq/README.md b/install-bundler/install-rabbitmq/rabbitmq/README.md new file mode 100644 index 00000000..804cddef --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/README.md @@ -0,0 +1,747 @@ + + +# RabbitMQ packaged by Bitnami + +RabbitMQ is an open source general-purpose message broker that is designed for consistent, highly-available messaging scenarios (both synchronous and asynchronous). + +[Overview of RabbitMQ](https://www.rabbitmq.com) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use RabbitMQ in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### RabbitMQ Image parameters + +| Name | Description | Value | +| ------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | RabbitMQ image registry | `REGISTRY_NAME` | +| `image.repository` | RabbitMQ image repository | `REPOSITORY_NAME/rabbitmq` | +| `image.digest` | RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | RabbitMQ image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | + +### Common parameters + +| Name | Description | Value | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| `nameOverride` | String to partially override rabbitmq.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `servicenameOverride` | String to partially override headless service name | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| `hostAliases` | Deployment pod host aliases | `[]` | +| `dnsPolicy` | DNS Policy for pod | `""` | +| `dnsConfig` | DNS Configuration pod | `{}` | +| `auth.username` | RabbitMQ application username | `user` | +| `auth.password` | RabbitMQ application password | `""` | +| `auth.securePassword` | Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. | `true` | +| `auth.existingPasswordSecret` | Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) | `""` | +| `auth.enableLoopbackUser` | If enabled, the user `auth.username` can only connect from localhost | `false` | +| `auth.erlangCookie` | Erlang cookie to determine whether different nodes are allowed to communicate with each other | `""` | +| `auth.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) | `""` | +| `auth.tls.enabled` | Enable TLS support on RabbitMQ | `false` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `auth.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `auth.tls.sslOptionsVerify` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | `verify_peer` | +| `auth.tls.sslOptionsPassword.enabled` | Enable usage of password for private Key | `false` | +| `auth.tls.sslOptionsPassword.existingSecret` | Name of existing Secret containing the sslOptionsPassword | `""` | +| `auth.tls.sslOptionsPassword.key` | Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret | `""` | +| `auth.tls.sslOptionsPassword.password` | Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored | `""` | +| `auth.tls.caCertificate` | Certificate Authority (CA) bundle content | `""` | +| `auth.tls.serverCertificate` | Server certificate content | `""` | +| `auth.tls.serverKey` | Server private key content | `""` | +| `auth.tls.existingSecret` | Existing secret with certificate content to RabbitMQ credentials | `""` | +| `auth.tls.existingSecretFullChain` | Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. | `false` | +| `auth.tls.overrideCaCertificate` | Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. | `""` | +| `logs` | Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable | `-` | +| `ulimitNofiles` | RabbitMQ Max File Descriptors | `65536` | +| `maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `""` | +| `onlineSchedulers` | RabbitMQ online scheduler threads | `""` | +| `memoryHighWatermark.enabled` | Enable configuring Memory high watermark on RabbitMQ | `false` | +| `memoryHighWatermark.type` | Memory high watermark type. Either `absolute` or `relative` | `relative` | +| `memoryHighWatermark.value` | Memory high watermark value | `0.4` | +| `plugins` | List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `communityPlugins` | List of Community plugins (URLs) to be downloaded during container initialization | `""` | +| `extraPlugins` | Extra plugins to enable (single string containing a space-separated list) | `rabbitmq_auth_backend_ldap` | +| `clustering.enabled` | Enable RabbitMQ clustering | `true` | +| `clustering.addressType` | Switch clustering mode. Either `ip` or `hostname` | `hostname` | +| `clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `clustering.forceBoot` | Force boot of an unexpectedly shut down cluster (in an unexpected order). | `false` | +| `clustering.partitionHandling` | Switch Partition Handling Strategy. Either `autoheal` or `pause_minority` or `pause_if_all_down` or `ignore` | `autoheal` | +| `loadDefinition.enabled` | Enable loading a RabbitMQ definitions file to configure RabbitMQ | `false` | +| `loadDefinition.file` | Name of the definitions file | `/app/load_definition.json` | +| `loadDefinition.existingSecret` | Existing secret with the load definitions file | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `lifecycleHooks` | Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup | `{}` | +| `terminationGracePeriodSeconds` | Default duration in seconds k8s waits for container to exit before sending kill signal. | `120` | +| `extraEnvVars` | Extra environment variables to add to RabbitMQ pods | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra environment variables | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra environment variables (in case of sensitive data) | `""` | +| `containerPorts.amqp` | | `5672` | +| `containerPorts.amqpTls` | | `5671` | +| `containerPorts.dist` | | `25672` | +| `containerPorts.manager` | | `15672` | +| `containerPorts.epmd` | | `4369` | +| `containerPorts.metrics` | | `9419` | +| `initScripts` | Dictionary of init scripts. Evaluated as a template. | `{}` | +| `initScriptsCM` | ConfigMap with the init scripts. Evaluated as a template. | `""` | +| `initScriptsSecret` | Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. | `""` | +| `extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `[]` | +| `configuration` | RabbitMQ Configuration file content: required cluster configuration | `""` | +| `tcpListenOptions.backlog` | Maximum size of the unaccepted TCP connections queue | `128` | +| `tcpListenOptions.nodelay` | When set to true, deactivates Nagle's algorithm. Default is true. Highly recommended for most users. | `true` | +| `tcpListenOptions.linger.lingerOn` | Enable Server socket lingering | `true` | +| `tcpListenOptions.linger.timeout` | Server Socket lingering timeout | `0` | +| `tcpListenOptions.keepalive` | When set to true, enables TCP keepalives | `false` | +| `configurationExistingSecret` | Existing secret with the configuration to use as rabbitmq.conf. | `""` | +| `extraConfiguration` | Configuration file content: extra configuration to be appended to RabbitMQ configuration | `""` | +| `extraConfigurationExistingSecret` | Existing secret with the extra configuration to append to `configuration`. | `""` | +| `advancedConfiguration` | Configuration file content: advanced configuration | `""` | +| `advancedConfigurationExistingSecret` | Existing secret with the advanced configuration file (must contain a key `advanced.config`). | `""` | +| `featureFlags` | that controls what features are considered to be enabled or available on all cluster nodes. | `""` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.uri` | LDAP connection string. | `""` | +| `ldap.servers` | List of LDAP servers hostnames. This is valid only if ldap.uri is not set | `[]` | +| `ldap.port` | LDAP servers port. This is valid only if ldap.uri is not set | `""` | +| `ldap.userDnPattern` | Pattern used to translate the provided username into a value to be used for the LDAP bind. | `""` | +| `ldap.binddn` | DN of the account used to search in the LDAP server. | `""` | +| `ldap.bindpw` | Password for binddn account. | `""` | +| `ldap.basedn` | Base DN path where binddn account will search for the users. | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.uidField` | Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration | `""` | +| `ldap.authorisationEnabled` | Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings | `false` | +| `ldap.tls.enabled` | Enabled TLS configuration. | `false` | +| `ldap.tls.startTls` | Use STARTTLS instead of LDAPS. | `false` | +| `ldap.tls.skipVerify` | Skip any SSL verification (hostanames or certificates) | `false` | +| `ldap.tls.verify` | Verify connection. Valid values are 'verify_peer' or 'verify_none' | `verify_peer` | +| `ldap.tls.certificatesMountPath` | Where LDAP certifcates are mounted. | `/opt/bitnami/rabbitmq/ldap/certs` | +| `ldap.tls.certificatesSecret` | Secret with LDAP certificates. | `""` | +| `ldap.tls.CAFilename` | CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certFilename` | Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `ldap.tls.certKeyFilename` | Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. | `""` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes . | `[]` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | +| `extraSecretsPrependReleaseName` | Set this flag to true if extraSecrets should be created with prepended. | `false` | + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ---------------- | +| `replicaCount` | Number of RabbitMQ replicas to deploy | `1` | +| `schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `podLabels` | RabbitMQ Pod labels. Evaluated as a template | `{}` | +| `podAnnotations` | RabbitMQ Pod annotations. Evaluated as a template | `{}` | +| `updateStrategy.type` | Update strategy type for RabbitMQ statefulset | `RollingUpdate` | +| `statefulsetLabels` | RabbitMQ statefulset labels. Evaluated as a template | `{}` | +| `statefulsetAnnotations` | RabbitMQ statefulset annotations. Evaluated as a template | `{}` | +| `priorityClassName` | Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand | `""` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment. Evaluated as a template | `{}` | +| `nodeSelector` | Node labels for pod assignment. Evaluated as a template | `{}` | +| `tolerations` | Tolerations for pod assignment. Evaluated as a template | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podSecurityContext.enabled` | Enable RabbitMQ pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set RabbitMQ pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled RabbitMQ containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set RabbitMQ containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set RabbitMQ container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's privilege escalation | `false` | +| `containerSecurityContext.capabilities.drop` | Set container's Security Context runAsNonRoot | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `resources.limits` | The resources limits for RabbitMQ containers | `{}` | +| `resources.requests` | The requested resources for RabbitMQ containers | `{}` | +| `livenessProbe.enabled` | Enable livenessProbe | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `120` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `20` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `20` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `20` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `customStartupProbe` | Define a custom startup probe | `{}` | +| `initContainers` | Add init containers to the RabbitMQ pod | `[]` | +| `sidecars` | Add sidecar containers to the RabbitMQ pod | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### RBAC parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------ | ------ | +| `serviceAccount.create` | Enable creation of ServiceAccount for RabbitMQ pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | `""` | +| `serviceAccount.automountServiceAccountToken` | Auto-mount the service account token in the pod | `true` | +| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | +| `rbac.create` | Whether RBAC rules should be created | `true` | + +### Persistence parameters + +| Name | Description | Value | +| --------------------------- | ------------------------------------------------ | -------------------------- | +| `persistence.enabled` | Enable RabbitMQ data persistence using PVC | `true` | +| `persistence.storageClass` | PVC Storage Class for RabbitMQ data volume | `""` | +| `persistence.selector` | Selector to match an existing Persistent Volume | `{}` | +| `persistence.accessModes` | PVC Access Modes for RabbitMQ data volume | `["ReadWriteOnce"]` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaims | `""` | +| `persistence.mountPath` | The path the volume will be mounted at | `/bitnami/rabbitmq/mnesia` | +| `persistence.subPath` | The subdirectory of the volume to mount to | `""` | +| `persistence.size` | PVC Storage Request for RabbitMQ data volume | `8Gi` | +| `persistence.annotations` | Persistence annotations. Evaluated as a template | `{}` | +| `persistence.labels` | Persistence labels. Evaluated as a template | `{}` | + +### Exposure parameters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.portEnabled` | Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. | `true` | +| `service.distPortEnabled` | Erlang distribution server port | `true` | +| `service.managerPortEnabled` | RabbitMQ Manager port | `true` | +| `service.epmdPortEnabled` | RabbitMQ EPMD Discovery service port | `true` | +| `service.ports.amqp` | Amqp service port | `5672` | +| `service.ports.amqpTls` | Amqp TLS service port | `5671` | +| `service.ports.dist` | Erlang distribution service port | `25672` | +| `service.ports.manager` | RabbitMQ Manager service port | `15672` | +| `service.ports.metrics` | RabbitMQ Prometheues metrics service port | `9419` | +| `service.ports.epmd` | EPMD Discovery service port | `4369` | +| `service.portNames.amqp` | Amqp service port name | `amqp` | +| `service.portNames.amqpTls` | Amqp TLS service port name | `amqp-tls` | +| `service.portNames.dist` | Erlang distribution service port name | `dist` | +| `service.portNames.manager` | RabbitMQ Manager service port name | `http-stats` | +| `service.portNames.metrics` | RabbitMQ Prometheues metrics service port name | `metrics` | +| `service.portNames.epmd` | EPMD Discovery service port name | `epmd` | +| `service.nodePorts.amqp` | Node port for Ampq | `""` | +| `service.nodePorts.amqpTls` | Node port for Ampq TLS | `""` | +| `service.nodePorts.dist` | Node port for Erlang distribution | `""` | +| `service.nodePorts.manager` | Node port for RabbitMQ Manager | `""` | +| `service.nodePorts.epmd` | Node port for EPMD Discovery | `""` | +| `service.nodePorts.metrics` | Node port for RabbitMQ Prometheues metrics | `""` | +| `service.extraPorts` | Extra ports to expose in the service | `[]` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is `LoadBalancer` | `[]` | +| `service.externalIPs` | Set the ExternalIPs | `[]` | +| `service.externalTrafficPolicy` | Enable client source IP preservation | `Cluster` | +| `service.loadBalancerIP` | Set the LoadBalancerIP | `""` | +| `service.clusterIP` | Kubernetes service Cluster IP | `""` | +| `service.labels` | Service labels. Evaluated as a template | `{}` | +| `service.annotations` | Service annotations. Evaluated as a template | `{}` | +| `service.annotationsHeadless` | Headless Service annotations. Evaluated as a template | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.path` | Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. | `/` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.hostname` | Default host for the ingress resource | `rabbitmq.local` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the hostname defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Set this to true in order to create a TLS secret for this ingress record | `false` | +| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraRules` | The list of additional rules to be added to this ingress record. Evaluated as a template | `[]` | +| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.existingSecret` | It is you own the certificate as secret. | `""` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. | `[]` | + +### Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------ | -------------------------------------------------------------------------------------- | --------------------- | +| `metrics.enabled` | Enable exposing RabbitMQ metrics to be gathered by Prometheus | `false` | +| `metrics.plugins` | Plugins to enable Prometheus metrics in RabbitMQ | `rabbitmq_prometheus` | +| `metrics.podAnnotations` | Annotations for enabling prometheus to access the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Specify the namespace in which the serviceMonitor resource will be created | `""` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.targetLabels` | Used to keep given service's labels in target | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Used to keep given pod's labels in target | `{}` | +| `metrics.serviceMonitor.path` | Define the path used by ServiceMonitor to scrap metrics | `""` | +| `metrics.serviceMonitor.params` | Define the HTTP URL parameters used by ServiceMonitor | `{}` | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.annotations` | Extra annotations for the ServiceMonitor | `{}` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | `""` | +| `metrics.prometheusRule.rules` | List of rules, used as template by Helm. | `[]` | + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + +The above parameters map to the env variables defined in [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq). For more information please refer to the [bitnami/rabbitmq](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.username=admin,auth.password=secretpassword,auth.erlangCookie=secretcookie \ + oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Set pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Scale horizontally + +To horizontally scale this chart once it has been deployed, two options are available: + +- Use the `kubectl scale` command. +- Upgrade the chart modifying the `replicaCount` parameter. + +> NOTE: It is mandatory to specify the password and Erlang cookie that was set the first time the chart was installed when upgrading the chart. + +When scaling down the solution, unnecessary RabbitMQ nodes are automatically stopped, but they are not removed from the cluster. You need to manually remove them by running the `rabbitmqctl forget_cluster_node` command. + +Refer to the chart documentation for [more information on scaling the Rabbit cluster horizontally](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/scale-deployment/). + +### Enable TLS support + +To enable TLS support, first generate the certificates as described in the [RabbitMQ documentation for SSL certificate generation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +Once the certificates are generated, you have two alternatives: + +- Create a secret with the certificates and associate the secret when deploying the chart +- Include the certificates in the *values.yaml* file when deploying the chart + +Set the *auth.tls.failIfNoPeerCert* parameter to *false* to allow a TLS connection if the client fails to provide a certificate. + +Set the *auth.tls.sslOptionsVerify* to *verify_peer* to force a node to perform peer verification. When set to *verify_none*, peer verification will be disabled and certificate exchange won't be performed. + +Refer to the chart documentation for [more information and examples of enabling TLS and using Let's Encrypt certificates](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/enable-tls-ingress/). + +### Load custom definitions + +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](https://www.rabbitmq.com/management.html#load-definitions). + +Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. + +Next, specify the `load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. Any load definitions specified will be available within in the container at `/app`. + +> NOTE: Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. These secrets can also be templated to use supplied chart values. + +Refer to the chart documentation for [more information and configuration examples of loading custom definitions](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/load-files/). + +### Configure LDAP support + +LDAP support can be enabled in the chart by specifying the `ldap.*` parameters while creating a release. Refer to the chart documentation for [more information and a configuration example](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/configure-ldap/). + +### Configure memory high watermark + +It is possible to configure a memory high watermark on RabbitMQ to define [memory thresholds](https://www.rabbitmq.com/memory.html#threshold) using the `memoryHighWatermark.*` parameters. To do so, you have two alternatives: + +- Set an absolute limit of RAM to be used on each RabbitMQ node, as shown in the configuration example below: + +```text +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="absolute" +memoryHighWatermark.value="512MB" +``` + +- Set a relative limit of RAM to be used on each RabbitMQ node. To enable this feature, define the memory limits at pod level too. An example configuration is shown below: + +```text +memoryHighWatermark.enabled="true" +memoryHighWatermark.type="relative" +memoryHighWatermark.value="0.4" +resources.limits.memory="2Gi" +``` + +### Add extra environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `.extraEnvVarsCM` or the `extraEnvVarsSecret` properties. + +### Use plugins + +The Bitnami Docker RabbitMQ image ships a set of plugins by default. By default, this chart enables `rabbitmq_management` and `rabbitmq_peer_discovery_k8s` since they are required for RabbitMQ to work on K8s. + +To enable extra plugins, set the `extraPlugins` parameter with the list of plugins you want to enable. In addition to this, the `communityPlugins` parameter can be used to specify a list of URLs (separated by spaces) for custom plugins for RabbitMQ. + +Refer to the chart documentation for [more information on using RabbitMQ plugins](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/configuration/use-plugins/). + +### Advanced logging + +In case you want to configure RabbitMQ logging set `logs` value to false and set the log config in extraConfiguration following the [official documentation](https://www.rabbitmq.com/logging.html#log-file-location). + +An example: + +```yaml +logs: false # custom logging +extraConfiguration: | + log.default.level = warning + log.file = false + log.console = true + log.console.level = warning + log.console.formatter = json +``` + +### Recover the cluster from complete shutdown + +> IMPORTANT: Some of these procedures can lead to data loss. Always make a backup beforehand. + +The RabbitMQ cluster is able to support multiple node failures but, in a situation in which all the nodes are brought down at the same time, the cluster might not be able to self-recover. + +This happens if the pod management policy of the statefulset is not `Parallel` and the last pod to be running wasn't the first pod of the statefulset. If that happens, update the pod management policy to recover a healthy state: + +```console +$ kubectl delete statefulset STATEFULSET_NAME --cascade=false +helm upgrade RELEASE_NAME oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +For a faster resyncronization of the nodes, you can temporarily disable the readiness probe by setting `readinessProbe.enabled=false`. Bear in mind that the pods will be exposed before they are actually ready to process requests. + +If the steps above don't bring the cluster to a healthy state, it could be possible that none of the RabbitMQ nodes think they were the last node to be up during the shutdown. In those cases, you can force the boot of the nodes by specifying the `clustering.forceBoot=true` parameter (which will execute [`rabbitmqctl force_boot`](https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot) in each pod): + +```console +helm upgrade RELEASE_NAME oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq \ + --set podManagementPolicy=Parallel \ + --set clustering.forceBoot=true \ + --set replicaCount=NUMBER_OF_REPLICAS \ + --set auth.password=PASSWORD \ + --set auth.erlangCookie=ERLANG_COOKIE +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +More information: [Clustering Guide: Restarting](https://www.rabbitmq.com/clustering.html#restarting). + +### Known issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Use existing PersistentVolumeClaims + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```console +helm install my-release --set persistence.existingClaim=PVC_NAME oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +### Configure the default user/vhost + +If you want to create default user/vhost and set the default permission. you can use `extraConfiguration`: + +```yaml +auth: + username: default-user +extraConfiguration: |- + default_vhost = default-vhost + default_permissions.configure = .* + default_permissions.read = .* + default_permissions.write = .* +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +It's necessary to set the `auth.password` and `auth.erlangCookie` parameters when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password and the cookie, and run the command below to upgrade your chart: + +```console +helm upgrade my-release oci://REGISTRY_NAME/REPOSITORY_NAME/rabbitmq --set auth.password=[PASSWORD] --set auth.erlangCookie=[RABBITMQ_ERLANG_COOKIE] +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +| Note: you need to substitute the placeholders [PASSWORD] and [RABBITMQ_ERLANG_COOKIE] with the values obtained in the installation notes. + +### To 11.0.0 + +This major version changes the default RabbitMQ image from 3.10.x to 3.11.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.10 to 3.11. + +### To 10.0.0 + +This major version changes the default RabbitMQ image from 3.9.x to 3.10.x. Follow the [official instructions](https://www.rabbitmq.com/upgrade.html) to upgrade from 3.9 to 3.10. + +### To 9.0.0 + +This major release renames several values in this chart and adds missing features, in order to be aligned with the rest of the assets in the Bitnami charts repository. + + .dist + .manager + .metrics + .epmd + +- `service.port` has been renamed as `service.ports.amqp`. +- `service.portName` has been renamed as `service.portNames.amqp`. +- `service.nodePort`has been renamed as `service.nodePorts.amqp`. +- `service.tlsPort` has been renamed as `service.ports.amqpTls`. +- `service.tlsPortName` has been renamed as `service.portNames.amqpTls`. +- `service.tlsNodePort` has been renamed as `service.nodePorts.amqpTls`. +- `service.epmdPortName` has been renamed as `service.portNames.epmd`. +- `service.epmdNodePort` has been renamed as `service.nodePorts.epmd`. +- `service.distPort` has been renamed as `service.ports.dist`. +- `service.distPortName` has been renamed as `service.portNames.dist`. +- `service.distNodePort` has been renamed as `service.nodePorts.dist`. +- `service.managerPort` has been renamed as `service.ports.manager`. +- `service.managerPortName` has been renamed as `service.portNames.manager`. +- `service.managerNodePort` has been renamed as `service.nodePorts.manager`. +- `service.metricsPort` has been renamed as `service.ports.metrics`. +- `service.metricsPortName` has been renamed as `service.portNames.metrics`. +- `service.metricsNodePort` has been renamed as `service.nodePorts.metrics`. +- `persistence.volumes` has been removed, as it duplicates the parameter `extraVolumes`. +- `ingress.certManager` has been removed. +- `metrics.serviceMonitor.relabellings` has been replaced with `metrics.serviceMonitor.relabelings`, and it sets the field `relabelings` instead of `metricRelabelings`. +- `metrics.serviceMonitor.additionalLabels` has been renamed as `metrics.serviceMonitor.labels` +- `updateStrategyType` has been removed, use the field `updateStrategy` instead, which is interpreted as a template. +- The content of `podSecurityContext` and `containerSecurityContext` have been modified. +- The behavior of VolumePermissions has been modified to not change ownership of '.snapshot' and 'lost+found' +- Introduced the values `ContainerPorts.*`, separating the service and container ports configuration. + +### To 8.21.0 + +This new version of the chart bumps the RabbitMQ version to `3.9.1`. It is considered a minor release, and no breaking changes are expected. Additionally, RabbitMQ `3.9.X` nodes can run alongside `3.8.X` nodes. + +See the [Upgrading guide](https://www.rabbitmq.com/upgrade.html) and the [RabbitMQ change log](https://www.rabbitmq.com/changelog.html) for further documentation. + +### To 8.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/administration/upgrade-helm3/). + +### To 7.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - `replicas` is renamed to `replicaCount`. + - `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`. + - Authentication parameters were reorganized under the `auth.*` parameter: + - `rabbitmq.username`, `rabbitmq.password`, and `rabbitmq.erlangCookie` are now `auth.username`, `auth.password`, and `auth.erlangCookie` respectively. + - `rabbitmq.tls.*` parameters are now under `auth.tls.*`. + - Parameters prefixed with `rabbitmq.` were renamed removing the prefix. E.g. `rabbitmq.configuration` -> renamed to `configuration`. + - `rabbitmq.rabbitmqClusterNodeName` is deprecated. + - `rabbitmq.setUlimitNofiles` is deprecated. + - `forceBoot.enabled` is renamed to `clustering.forceBoot`. + - `loadDefinition.secretName` is renamed to `loadDefinition.existingSecret`. + - `metics.port` is remamed to `service.metricsPort`. + - `service.extraContainerPorts` is renamed to `extraContainerPorts`. + - `service.nodeTlsPort` is renamed to `service.tlsNodePort`. + - `podDisruptionBudget` is deprecated in favor of `pdb.create`, `pdb.minAvailable`, and `pdb.maxUnavailable`. + - `rbacEnabled` -> deprecated in favor of `rbac.create`. + - New parameters: `serviceAccount.create`, and `serviceAccount.name`. + - New parameters: `memoryHighWatermark.enabled`, `memoryHighWatermark.type`, and `memoryHighWatermark.value`. +- Chart labels and Ingress configuration were adapted to follow the Helm charts best practices. +- Initialization logic now relies on the container. +- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. +- The layout of the persistent volumes has changed (if using persistence). Action is required if preserving data through the upgrade is desired: + - The data has moved from `mnesia/` within the persistent volume to the root of the persistent volume + - The `config/` and `schema/` directories within the persistent volume are no longer used + - An init container can be used to move and clean up the peristent volumes. An example can be found [here](https://github.com/bitnami/charts/issues/10913#issuecomment-1169619513). + - Alternately the value `persistence.subPath` can be overridden to be `mnesia` so that the directory layout is consistent with what it was previously. + - Note however that this will leave the unused `config/` and `schema/` directories within the peristent volume forever. + +Consequences: + +- Backwards compatibility is not guaranteed. +- Compatibility with non Bitnami images is not guaranteed anymore. + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +kubectl delete statefulset rabbitmq --cascade=false +``` + +## Bitnami Kubernetes Documentation + +Bitnami Kubernetes documentation is available at [https://docs.bitnami.com/](https://docs.bitnami.com/). You can find there the following resources: + +- [Documentation for RabbitMQ Helm chart](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq/) +- [Get Started with Kubernetes guides](https://docs.bitnami.com/kubernetes/) +- [Bitnami Helm charts documentation](https://docs.bitnami.com/kubernetes/apps/) +- [Kubernetes FAQs](https://docs.bitnami.com/kubernetes/faq/) +- [Kubernetes Developer guides](https://docs.bitnami.com/tutorials/) + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/.helmignore b/install-bundler/install-rabbitmq/rabbitmq/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/Chart.yaml b/install-bundler/install-rabbitmq/rabbitmq/charts/common/Chart.yaml new file mode 100644 index 00000000..40cd22d7 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.13.3 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.13.3 diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/README.md b/install-bundler/install-rabbitmq/rabbitmq/charts/common/README.md new file mode 100644 index 00000000..80da4cc2 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.8.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_affinities.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..e85b1df4 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_capabilities.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..115674af --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_capabilities.tpl @@ -0,0 +1,229 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if PodSecurityPolicy is supported +*/}} +{{- define "common.capabilities.psp.supported" -}} +{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if AdmissionConfiguration is supported +*/}} +{{- define "common.capabilities.admissionConfiguration.supported" -}} +{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for AdmissionConfiguration. +*/}} +{{- define "common.capabilities.admissionConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiserver.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityConfiguration. +*/}} +{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "pod-security.admission.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_errors.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..07ded6f6 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_images.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_images.tpl new file mode 100644 index 00000000..1bcb779d --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_images.tpl @@ -0,0 +1,117 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end }} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_ingress.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..efa5b85c --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_labels.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..d90a6cdc --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_labels.tpl @@ -0,0 +1,46 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}} +{{- with .context.Chart.AppVersion -}} +{{- $_ := set $default "app.kubernetes.io/version" . -}} +{{- end -}} +{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_names.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_names.tpl new file mode 100644 index 00000000..a222924f --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_secrets.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a193c46b --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_storage.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..16405a0f --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_tplvalues.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..a8ed7637 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_utils.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..bfbddf05 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_utils.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376). +Usage: +{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }} +*/}} +{{- define "common.utils.checksumTemplate" -}} +{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}} +{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_warnings.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..66dffc1f --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_cassandra.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..eda9aada --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mariadb.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..17d83a2f --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mongodb.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..bbb445b8 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mysql.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..ca3953f8 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_postgresql.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..8c9aa570 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_redis.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..fc0d208d --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_validations.tpl b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..31ceda87 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/charts/common/values.yaml b/install-bundler/install-rabbitmq/rabbitmq/charts/common/values.yaml new file mode 100644 index 00000000..9abe0e15 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/NOTES.txt b/install-bundler/install-rabbitmq/rabbitmq/templates/NOTES.txt new file mode 100644 index 00000000..2d26bce9 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/NOTES.txt @@ -0,0 +1,154 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- $servicePort := or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) | ternary .Values.service.ports.amqp .Values.service.ports.amqpTls -}} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/rabbitmq/entrypoint.sh /opt/bitnami/scripts/rabbitmq/run.sh + +{{- else }} + +Credentials: + +{{- if not .Values.loadDefinition.enabled }} + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretPasswordName" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 -d)" +{{- end }} + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "rabbitmq.secretErlangName" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 -d)" + +Note that the credentials are saved in persistent volume claims and will not be changed upon upgrade or reinstallation unless the persistent volume claim has been deleted. If this is not the first installation of this chart, the credentials may not be valid. +This is applicable when no passwords are set and therefore the random password is autogenerated. In case of using a fixed password, you should specify it when upgrading. +More information about the credentials may be found at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases. + +RabbitMQ can be accessed within the cluster on port {{ $servicePort }} at {{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To access for outside the cluster, perform the following steps: + +{{- if .Values.ingress.enabled }} +{{- if contains "NodePort" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + +1. Create a port-forward to the AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} & + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + +{{- end }} + +2. Access RabbitMQ using using the obtained URL. + +To Access the RabbitMQ Management interface: + +1. Get the RabbitMQ Management URL and associate its hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "RabbitMQ Management: http{{ if .Values.ingress.tls }}s{{ end }}://{{ .Values.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.ingress.hostname }}" | sudo tee -a /etc/hosts + +2. Open a browser and access RabbitMQ Management using the obtained URL. + +{{- else }} +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='amqp')].nodePort}" services {{ include "common.names.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[?(@.name=='http-stats')].nodePort}" services {{ include "common.names.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ $servicePort }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.ports.manager }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://127.0.0.1:{{ $servicePort }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ $servicePort }}:{{ $servicePort }} + +To Access the RabbitMQ Management interface: + + echo "URL : http://127.0.0.1:{{ .Values.service.ports.manager }}/" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.manager }}:{{ .Values.service.ports.manager }} + +{{- end }} +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus URL by running: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "common.names.fullname" . }} {{ .Values.service.ports.metrics }}:{{ .Values.service.ports.metrics }} & + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.service.ports.metrics }}/metrics" + +Then, open the obtained URL in a browser. + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} + +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/_helpers.tpl b/install-bundler/install-rabbitmq/rabbitmq/templates/_helpers.tpl new file mode 100644 index 00000000..f6cacb88 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,269 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) }} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "rabbitmq.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.auth.existingPasswordSecret -}} + {{- printf "%s" (tpl .Values.auth.existingPasswordSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.auth.existingErlangSecret -}} + {{- printf "%s" (tpl .Values.auth.existingErlangSecret $) -}} + {{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the TLS secret. +*/}} +{{- define "rabbitmq.tlsSecretName" -}} + {{- if .Values.auth.tls.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.tls.existingSecret $) -}} + {{- else -}} + {{- printf "%s-certs" (include "common.names.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "rabbitmq.createTlsSecret" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.plugins -}} +{{- if .Values.extraPlugins -}} +{{- $plugins = printf "%s %s" $plugins .Values.extraPlugins -}} +{{- end -}} +{{- if .Values.metrics.enabled -}} +{{- $plugins = printf "%s %s" $plugins .Values.metrics.plugins -}} +{{- end -}} +{{- printf "%s" $plugins | replace " " ", " -}} +{{- end -}} + +{{/* +Return the number of bytes given a value +following a base 2 o base 10 number system. +Usage: +{{ include "rabbitmq.toBytes" .Values.path.to.the.Value }} +*/}} +{{- define "rabbitmq.toBytes" -}} +{{- $value := int (regexReplaceAll "([0-9]+).*" . "${1}") }} +{{- $unit := regexReplaceAll "[0-9]+(.*)" . "${1}" }} +{{- if eq $unit "Ki" }} + {{- mul $value 1024 }} +{{- else if eq $unit "Mi" }} + {{- mul $value 1024 1024 }} +{{- else if eq $unit "Gi" }} + {{- mul $value 1024 1024 1024 }} +{{- else if eq $unit "Ti" }} + {{- mul $value 1024 1024 1024 1024 }} +{{- else if eq $unit "Pi" }} + {{- mul $value 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "Ei" }} + {{- mul $value 1024 1024 1024 1024 1024 1024 }} +{{- else if eq $unit "K" }} + {{- mul $value 1000 }} +{{- else if eq $unit "M" }} + {{- mul $value 1000 1000 }} +{{- else if eq $unit "G" }} + {{- mul $value 1000 1000 1000 }} +{{- else if eq $unit "T" }} + {{- mul $value 1000 1000 1000 1000 }} +{{- else if eq $unit "P" }} + {{- mul $value 1000 1000 1000 1000 1000 }} +{{- else if eq $unit "E" }} + {{- mul $value 1000 1000 1000 1000 1000 1000 }} +{{- end }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.memoryHighWatermark" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ingress.tls" .) -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.auth.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- $serversListLength := len .Values.ldap.servers }} +{{- $userDnPattern := coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} +{{- if or (and (not (gt $serversListLength 0)) (empty .Values.ldap.uri)) (and (not $userDnPattern) (not .Values.ldap.basedn)) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.servers" or "ldap.uri" are mandatory + to configure the connection and "ldap.userDnPattern" or "ldap.basedn" are necessary to lookup the users. Please provide them: + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.servers[0]=my-ldap-server" \ + --set ldap.port="389" \ + --set ldap.userDnPattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - Memory high watermark +*/}} +{{- define "rabbitmq.validateValues.memoryHighWatermark" -}} +{{- if and (not (eq .Values.memoryHighWatermark.type "absolute")) (not (eq .Values.memoryHighWatermark.type "relative")) }} +rabbitmq: memoryHighWatermark.type + Invalid Memory high watermark type. Valid values are "absolute" and + "relative". Please set a valid mode (--set memoryHighWatermark.type="xxxx") +{{- else if and .Values.memoryHighWatermark.enabled (not .Values.resources.limits.memory) (eq .Values.memoryHighWatermark.type "relative") }} +rabbitmq: memoryHighWatermark + You enabled configuring memory high watermark using a relative limit. However, + no memory limits were defined at POD level. Define your POD limits as shown below: + + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="relative" \ + --set memoryHighWatermark.value="0.4" \ + --set resources.limits.memory="2Gi" + + Altenatively, user an absolute value for the memory memory high watermark : + + $ helm install {{ .Release.Name }} oci://registry-1.docker.io/bitnamicharts/rabbitmq \ + --set memoryHighWatermark.enabled=true \ + --set memoryHighWatermark.type="absolute" \ + --set memoryHighWatermark.value="512MB" +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - TLS configuration for Ingress +*/}} +{{- define "rabbitmq.validateValues.ingress.tls" -}} +{{- if and .Values.ingress.enabled .Values.ingress.tls (not (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations ))) (not .Values.ingress.selfSigned) (not .Values.ingress.existingSecret) (empty .Values.ingress.extraTls) }} +rabbitmq: ingress.tls + You enabled the TLS configuration for the default ingress hostname but + you did not enable any of the available mechanisms to create the TLS secret + to be used by the Ingress Controller. + Please use any of these alternatives: + - Use the `ingress.extraTls` and `ingress.secrets` parameters to provide your custom TLS certificates. + - Use the `ingress.existingSecret` to provide your custom TLS certificates. + - Rely on cert-manager to create it by setting the corresponding annotations + - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` +{{- end -}} +{{- end -}} + +{{/* +Validate values of RabbitMQ - Auth TLS enabled +*/}} +{{- define "rabbitmq.validateValues.auth.tls" -}} +{{- if and .Values.auth.tls.enabled (not .Values.auth.tls.autoGenerated) (not .Values.auth.tls.existingSecret) (not .Values.auth.tls.caCertificate) (not .Values.auth.tls.serverCertificate) (not .Values.auth.tls.serverKey) }} +rabbitmq: auth.tls + You enabled TLS for RabbitMQ but you did not enable any of the available mechanisms to create the TLS secret. + Please use any of these alternatives: + - Provide an existing secret containing the TLS certificates using `auth.tls.existingSecret` + - Provide the plain text certificates using `auth.tls.caCertificate`, `auth.tls.serverCertificate` and `auth.tls.serverKey`. + - Enable auto-generated certificates using `auth.tls.autoGenerated`. +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts volume name. +*/}} +{{- define "rabbitmq.initScripts" -}} +{{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Get the extraConfigurationExistingSecret secret. +*/}} +{{- define "rabbitmq.extraConfiguration" -}} +{{- if not (empty .Values.extraConfigurationExistingSecret) -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" .Values.extraConfigurationExistingSecret "Length" 10 "Key" "extraConfiguration") -}} +{{- else -}} + {{- tpl .Values.extraConfiguration . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the TLS.sslOptions.Password secret. +*/}} +{{- define "rabbitmq.tlsSslOptionsPassword" -}} +{{- if not (empty .Values.auth.tls.sslOptionsPassword.password) -}} + {{- .Values.auth.tls.sslOptionsPassword.password -}} +{{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" .Values.auth.tls.sslOptionsPassword.existingSecret "Length" 10 "Key" .Values.auth.tls.sslOptionsPassword.key) -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/config-secret.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/config-secret.yaml new file mode 100644 index 00000000..28fa351f --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/config-secret.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or (empty .Values.configurationExistingSecret) .Values.advancedConfiguration }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if empty .Values.configurationExistingSecret }} + rabbitmq.conf: |- + {{- include "common.tplvalues.render" (dict "value" .Values.configuration "context" $) | b64enc | nindent 4 }} + {{- end }} + {{- if .Values.advancedConfiguration }} + advanced.config: |- + {{- include "common.tplvalues.render" (dict "value" .Values.advancedConfiguration "context" $) | b64enc | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/extra-list.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/extra-list.yaml new file mode 100644 index 00000000..2d35a580 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/ingress-tls-secrets.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/ingress-tls-secrets.yaml new file mode 100644 index 00000000..f38feba5 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/ingress-tls-secrets.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned (not .Values.ingress.existingSecret) }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "rabbitmq-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/ingress.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/ingress.yaml new file mode 100644 index 00000000..e9778147 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/ingress.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ include "common.tplvalues.render" ( dict "value" .Values.ingress.hostname "context" $ ) }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" .Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ include "common.tplvalues.render" ( dict "value" .name "context" $ ) }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" $.Values.service.portNames.manager "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned .Values.ingress.existingSecret) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ default (printf "%s-tls" .Values.ingress.hostname | trunc 63 | trimSuffix "-") .Values.ingress.existingSecret }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/init-configmap.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/init-configmap.yaml new file mode 100644 index 00000000..2e3ef38a --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/init-configmap.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.initScripts }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- include "common.tplvalues.render" ( dict "value" .Values.initScripts "context" $ ) | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/networkpolicy.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 00000000..b24cfa16 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.service.ports.epmd }} # EPMD + - port: {{ .Values.service.ports.amqp }} + - port: {{ .Values.service.ports.amqpTls }} + - port: {{ .Values.service.ports.dist }} + - port: {{ .Values.service.ports.manager }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ printf "%s-client" (include "common.names.fullname" .) }}: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 14 }} + {{- if .Values.networkPolicy.additionalRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.additionalRules "context" $) | nindent 8 }} + {{- end }} + {{- end }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.service.ports.metrics }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/pdb.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/pdb.yaml new file mode 100644 index 00000000..a02d4021 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/prometheusrule.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 00000000..d9c32b56 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace | quote}} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ template "common.names.name" $ }} + rules: {{- include "common.tplvalues.render" (dict "value" . "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/role.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/role.yaml new file mode 100644 index 00000000..3bbfadbf --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/role.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: Role +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/rolebinding.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 00000000..5bcdc301 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +metadata: + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "rabbitmq.serviceAccountName" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ printf "%s-endpoint-reader" (include "common.names.fullname" .) }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/secrets.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/secrets.yaml new file mode 100644 index 00000000..1aa2e6c3 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/secrets.yaml @@ -0,0 +1,64 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $host := printf "%s.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} +{{- $port := print .Values.service.ports.amqp }} +{{- $user := print .Values.auth.username }} +{{- $password := include "common.secrets.passwords.manage" (dict "secret" (include "rabbitmq.secretPasswordName" .) "key" "rabbitmq-password" "length" 16 "providedValues" (list "auth.password") "context" $) | trimAll "\"" | b64dec }} + +{{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (not .Values.auth.existingPasswordSecret ) }} + rabbitmq-password: {{ print $password | b64enc | quote }} + {{- end }} + {{- if (not .Values.auth.existingErlangSecret ) }} + rabbitmq-erlang-cookie: {{ include "common.secrets.passwords.manage" (dict "secret" (include "common.names.fullname" .) "key" "rabbitmq-erlang-cookie" "length" 32 "providedValues" (list "auth.erlangCookie") "context" $) }} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ ternary (printf "%s-%s" $.Release.Name $key) $key $.Values.extraSecretsPrependReleaseName }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: {{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 2 }} +{{- end }} +{{- if .Values.serviceBindings.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/rabbitmq +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "rabbitmq" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + username: {{ print $user | b64enc | quote }} + password: {{ print $password | b64enc | quote }} + uri: {{ printf "amqp://%s:%s@%s:%s" $user $password $host $port | b64enc | quote }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/serviceaccount.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 00000000..d6f38510 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "rabbitmq.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +secrets: + - name: {{ include "common.names.fullname" . }} +{{- end }} + diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/servicemonitor.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 00000000..3c2cff9c --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,57 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.path }} + path: {{ .Values.metrics.serviceMonitor.path }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.params }} + params: {{ toYaml .Values.metrics.serviceMonitor.params | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.podTargetLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.targetLabels }} + targetLabels: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.targetLabels "context" $) | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/statefulset.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/statefulset.yaml new file mode 100644 index 00000000..ba02f772 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,459 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.statefulsetAnnotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.statefulsetAnnotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 8 }} + {{- end }} + checksum/config: {{ include (print $.Template.BasePath "/config-secret.yaml") . | sha256sum }} + {{- if (include "rabbitmq.createTlsSecret" . ) }} + checksum/config: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (not .Values.auth.existingErlangSecret) (not .Values.auth.existingPasswordSecret) .Values.extraSecrets }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "rabbitmq.imagePullSecrets" . | nindent 6 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + serviceAccountName: {{ template "rabbitmq.serviceAccountName" . }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dnsConfig "context" .) | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "rabbitmq.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "{{ .Values.persistence.mountPath }}" + {{- end }} + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + xargs -r chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs -r chown -R "{{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -ec + - | + if [[ -f /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh ]]; then + /opt/bitnami/scripts/rabbitmq/nodeshutdown.sh -t {{ .Values.terminationGracePeriodSeconds | quote }} -d {{ ternary "true" "false" .Values.image.debug | quote }} + else + rabbitmqctl stop_app + fi + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + - name: K8S_ADDRESS_TYPE + value: {{ .Values.clustering.addressType }} + {{- if .Values.featureFlags }} + - name: RABBITMQ_FEATURE_FLAGS + value: {{ .Values.featureFlags }} + {{- end }} + - name: RABBITMQ_FORCE_BOOT + value: {{ ternary "yes" "no" .Values.clustering.forceBoot | quote }} + {{- if (eq "hostname" .Values.clustering.addressType) }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.clusterDomain }}" + {{- else }} + - name: RABBITMQ_NODE_NAME + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + - name: RABBITMQ_MNESIA_DIR + value: "{{ .Values.persistence.mountPath }}/$(RABBITMQ_NODE_NAME)" + - name: RABBITMQ_LDAP_ENABLE + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVERS + value: {{ .Values.ldap.servers | join "," | quote }} + - name: RABBITMQ_LDAP_SERVERS_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + {{- if .Values.logs }} + - name: RABBITMQ_LOGS + value: {{ .Values.logs | quote }} + {{- end }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.ulimitNofiles | quote }} + {{- if and .Values.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.maxAvailableSchedulers) (toString .Values.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + {{- if and .Values.clustering.rebalance (gt (.Values.replicaCount | int) 1) }} + - name: RABBITMQ_CLUSTER_REBALANCE + value: "true" + {{- end }} + - name: RABBITMQ_LOAD_DEFINITIONS + value: {{ ternary "yes" "no" .Values.loadDefinition.enabled | quote }} + - name: RABBITMQ_DEFINITIONS_FILE + value: {{ .Values.loadDefinition.file | quote }} + - name: RABBITMQ_SECURE_PASSWORD + value: {{ ternary "yes" "no" (or .Values.auth.securePassword (not .Values.auth.password)) | quote }} + - name: RABBITMQ_USERNAME + value: {{ .Values.auth.username | quote }} + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + - name: RABBITMQ_PLUGINS + value: {{ include "rabbitmq.plugins" . | quote }} + {{- if .Values.communityPlugins }} + - name: RABBITMQ_COMMUNITY_PLUGINS + value: {{ .Values.communityPlugins | quote }} + {{- end }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - name: amqp + containerPort: {{ .Values.containerPorts.amqp }} + - name: dist + containerPort: {{ .Values.containerPorts.dist }} + - name: stats + containerPort: {{ .Values.containerPorts.manager }} + - name: epmd + containerPort: {{ .Values.containerPorts.epmd }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.containerPorts.metrics }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: amqp-tls + containerPort: {{ .Values.containerPorts.amqpTls }} + {{- end }} + {{- if .Values.extraContainerPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraContainerPorts "context" $) | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q ping + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/virtual-hosts + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -ec + {{- if or (.Values.loadDefinition.enabled) (not (contains "rabbitmq_management" .Values.plugins )) }} + - rabbitmq-diagnostics -q check_running && rabbitmq-diagnostics -q check_local_alarms + {{- else }} + - curl -f --user {{ .Values.auth.username }}:$RABBITMQ_PASSWORD 127.0.0.1:{{ .Values.containerPorts.manager }}/api/health/checks/local-alarms + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: {{ternary "amqp-tls" "amqp" .Values.auth.tls.enabled }} + {{- end }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: configuration + mountPath: /bitnami/rabbitmq/conf + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + mountPath: {{ .Values.ldap.tls.certificatesMountPath }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/init-scripts + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + mountPath: /docker-entrypoint-initdb.d/init-scripts-cm + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/init-scripts-secret + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.auth.tls.enabled }} + - name: certs + projected: + sources: + - secret: + name: {{ template "rabbitmq.tlsSecretName" . }} + items: + {{- if not .Values.auth.tls.overrideCaCertificate }} + - key: {{ ternary "tls.crt" "ca.crt" .Values.auth.tls.existingSecretFullChain }} + path: ca_certificate.pem + {{- end }} + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- if .Values.auth.tls.overrideCaCertificate }} + - secret: + name: {{ .Values.auth.tls.overrideCaCertificate }} + items: + - key: ca.crt + path: ca_certificate.pem + {{- end }} + {{- end }} + {{- if and .Values.ldap.tls.enabled .Values.ldap.tls.certificatesSecret }} + - name: ldap-certs + secret: + secretName: {{ .Values.ldap.tls.certificatesSecret }} + {{- end }} + - name: configuration + projected: + sources: + {{- if or (and (empty .Values.configurationExistingSecret) .Values.configuration) (and (not .Values.advancedConfigurationExistingSecret) .Values.advancedConfiguration) }} + - secret: + name: {{ printf "%s-config" (include "common.names.fullname" .) }} + {{- end }} + {{- if and .Values.advancedConfigurationExistingSecret (not .Values.advancedConfiguration) }} + - secret: + name: {{ tpl .Values.advancedConfigurationExistingSecret . | quote }} + {{- end }} + {{- if not (empty .Values.configurationExistingSecret) }} + - secret: + name: {{ tpl .Values.configurationExistingSecret . | quote }} + {{- end }} + {{- if .Values.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ tpl .Values.loadDefinition.existingSecret . | quote }} + {{- end }} + {{- if .Values.initScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "rabbitmq.initScripts" . }} + {{- end }} + {{- if .Values.initScriptsCM }} + - name: custom-init-scripts-cm + configMap: + name: {{ tpl .Values.initScriptsCM . | quote }} + {{- end }} + {{- if .Values.initScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ tpl .Values.initScriptsSecret . | quote }} + defaultMode: 0755 + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} + {{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/svc-headless.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 00000000..f99292a2 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.servicenameOverride) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotationsHeadless .Values.commonAnnotations .Values.service.headless.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.service.annotationsHeadless .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + clusterIP: None + ports: + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- end }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + publishNotReadyAddresses: true diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/svc.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/svc.yaml new file mode 100644 index 00000000..42102302 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/svc.yaml @@ -0,0 +1,107 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if not (empty .Values.service.clusterIP) }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if not (empty .Values.service.loadBalancerIP) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + ports: + {{- if or (.Values.service.portEnabled) (not .Values.auth.tls.enabled) }} + - name: {{ .Values.service.portNames.amqp }} + port: {{ .Values.service.ports.amqp }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqp)) }} + nodePort: {{ .Values.service.nodePorts.amqp }} + {{- end }} + {{- end }} + {{- if .Values.auth.tls.enabled }} + - name: {{ .Values.service.portNames.amqpTls }} + port: {{ .Values.service.ports.amqpTls }} + targetPort: amqp-tls + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.amqpTls)) }} + nodePort: {{ .Values.service.nodePorts.amqpTls }} + {{- end }} + {{- end }} + {{- if .Values.service.epmdPortEnabled }} + - name: {{ .Values.service.portNames.epmd }} + port: {{ .Values.service.ports.epmd }} + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.epmd)) }} + nodePort: {{ .Values.service.nodePorts.epmd }} + {{- end }} + {{- end }} + {{- if .Values.service.distPortEnabled }} + - name: {{ .Values.service.portNames.dist }} + port: {{ .Values.service.ports.dist }} + targetPort: dist + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dist)) }} + nodePort: {{ .Values.service.nodePorts.dist }} + {{- end }} + {{- end }} + {{- if .Values.service.managerPortEnabled }} + - name: {{ .Values.service.portNames.manager }} + port: {{ .Values.service.ports.manager }} + targetPort: stats + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.manager)) }} + nodePort: {{ .Values.service.nodePorts.manager }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: {{ .Values.service.portNames.metrics }} + port: {{ .Values.service.ports.metrics }} + targetPort: metrics + {{- if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- else if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/tls-secrets.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/tls-secrets.yaml new file mode 100644 index 00000000..aafc6519 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/tls-secrets.yaml @@ -0,0 +1,36 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "rabbitmq.createTlsSecret" . ) }} +{{- $secretName := printf "%s-certs" (include "common.names.fullname" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + {{- if not .Values.auth.tls.autoGenerated }} + tls.crt: {{ required "A valid .Values.auth.tls.serverCertificate entry required!" .Values.auth.tls.serverCertificate | b64enc | quote }} + tls.key: {{ required "A valid .Values.auth.tls.serverKey entry required!" .Values.auth.tls.serverKey | b64enc | quote }} + ca.crt: {{ required "A valid .Values.auth.tls.caCertificate entry required!" .Values.auth.tls.caCertificate | b64enc | quote }} + {{- else }} + {{- $ca := genCA "rabbitmq-internal-ca" 365 }} + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $serviceName := include "common.names.fullname" . }} + {{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} + {{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-rabbitmq/rabbitmq/templates/validation.yaml b/install-bundler/install-rabbitmq/rabbitmq/templates/validation.yaml new file mode 100644 index 00000000..e0c1abcf --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/templates/validation.yaml @@ -0,0 +1,7 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- include "rabbitmq.validateValues" . }} + diff --git a/install-bundler/install-rabbitmq/rabbitmq/values.yaml b/install-bundler/install-rabbitmq/rabbitmq/values.yaml new file mode 100644 index 00000000..ea507e55 --- /dev/null +++ b/install-bundler/install-rabbitmq/rabbitmq/values.yaml @@ -0,0 +1,1446 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section RabbitMQ Image parameters +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## @param image.registry [default: REGISTRY_NAME] RabbitMQ image registry +## @param image.repository [default: REPOSITORY_NAME/rabbitmq] RabbitMQ image repository +## @skip image.tag RabbitMQ image tag (immutable tags are recommended) +## @param image.digest RabbitMQ image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy RabbitMQ image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Set to true if you would like to see extra information on logs +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.12.7-debian-11-r0 + digest: "" + ## set to true if you would like to see extra information on logs + ## It turns BASH and/or NAMI debugging in the image + ## + debug: false + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + +## @section Common parameters +## + +## @param nameOverride String to partially override rabbitmq.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override rabbitmq.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param servicenameOverride String to partially override headless service name +## +servicenameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @param hostAliases Deployment pod host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param dnsPolicy DNS Policy for pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsPolicy: ClusterFirst +## +dnsPolicy: "" +## @param dnsConfig DNS Configuration pod +## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## E.g. +## dnsConfig: +## options: +## - name: ndots +## value: "4" +## +dnsConfig: {} +## RabbitMQ Authentication parameters +## +auth: + ## @param auth.username RabbitMQ application username + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + username: user + ## @param auth.password RabbitMQ application password + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + password: "" + ## @param auth.securePassword Whether to set the RabbitMQ password securely. This is incompatible with loading external RabbitMQ definitions and 'true' when not setting the auth.password parameter. + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + securePassword: true + ## @param auth.existingPasswordSecret Existing secret with RabbitMQ credentials (must contain a value for `rabbitmq-password` key) + ## e.g: + ## existingPasswordSecret: name-of-existing-secret + ## + existingPasswordSecret: "" + ## @param auth.enableLoopbackUser If enabled, the user `auth.username` can only connect from localhost + ## + enableLoopbackUser: false + ## @param auth.erlangCookie Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + erlangCookie: "" + ## @param auth.existingErlangSecret Existing secret with RabbitMQ Erlang cookie (must contain a value for `rabbitmq-erlang-cookie` key) + ## e.g: + ## existingErlangSecret: name-of-existing-secret + ## + existingErlangSecret: "" + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## @param auth.tls.enabled Enable TLS support on RabbitMQ + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates + ## @param auth.tls.failIfNoPeerCert When set to true, TLS connection will be rejected if client fails to provide a certificate + ## @param auth.tls.sslOptionsVerify Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? + ## @param auth.tls.sslOptionsPassword.enabled Enable usage of password for private Key + ## @param auth.tls.sslOptionsPassword.existingSecret Name of existing Secret containing the sslOptionsPassword + ## @param auth.tls.sslOptionsPassword.key Enable Key referring to sslOptionsPassword in Secret specified in auth.tls.sslOptionsPassword.existingSecret + ## @param auth.tls.sslOptionsPassword.password Use this string as Password. If set, auth.tls.sslOptionsPassword.existingSecret and auth.tls.sslOptionsPassword.key are ignored + ## @param auth.tls.caCertificate Certificate Authority (CA) bundle content + ## @param auth.tls.serverCertificate Server certificate content + ## @param auth.tls.serverKey Server private key content + ## @param auth.tls.existingSecret Existing secret with certificate content to RabbitMQ credentials + ## @param auth.tls.existingSecretFullChain Whether or not the existing secret contains the full chain in the certificate (`tls.crt`). Will be used in place of `ca.cert` if `true`. + ## @param auth.tls.overrideCaCertificate Existing secret with certificate content be mounted instead of the `ca.crt` coming from caCertificate or existingSecret/existingSecretFullChain. + ## + tls: + enabled: false + autoGenerated: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + sslOptionsPassword: + enabled: false + existingSecret: "" + key: "" + password: "" + caCertificate: "" + serverCertificate: "" + serverKey: "" + existingSecret: "" + existingSecretFullChain: false + overrideCaCertificate: "" + +## @param logs Path of the RabbitMQ server's Erlang log file. Value for the `RABBITMQ_LOGS` environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: "-" +## @param ulimitNofiles RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +ulimitNofiles: "65536" +## RabbitMQ maximum available scheduler threads and online scheduler threads. By default it will create a thread per CPU detected, with the following parameters you can tune it manually. +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## ref: https://github.com/bitnami/charts/issues/2189 +## @param maxAvailableSchedulers RabbitMQ maximum available scheduler threads +## @param onlineSchedulers RabbitMQ online scheduler threads +## +maxAvailableSchedulers: "" +onlineSchedulers: "" + +## The memory threshold under which RabbitMQ will stop reading from client network sockets, in order to avoid being killed by the OS +## ref: https://www.rabbitmq.com/alarms.html +## ref: https://www.rabbitmq.com/memory.html#threshold +## +memoryHighWatermark: + ## @param memoryHighWatermark.enabled Enable configuring Memory high watermark on RabbitMQ + ## + enabled: false + ## @param memoryHighWatermark.type Memory high watermark type. Either `absolute` or `relative` + ## + type: "relative" + ## Memory high watermark value. + ## @param memoryHighWatermark.value Memory high watermark value + ## The default value of 0.4 stands for 40% of available RAM + ## Note: the memory relative limit is applied to the resource.limits.memory to calculate the memory threshold + ## You can also use an absolute value, e.g.: 256MB + ## + value: 0.4 + +## @param plugins List of default plugins to enable (should only be altered to remove defaults; for additional plugins use `extraPlugins`) +## +plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" +## @param communityPlugins List of Community plugins (URLs) to be downloaded during container initialization +## Combine it with extraPlugins to also enable them. +## +communityPlugins: "" +## @param extraPlugins Extra plugins to enable (single string containing a space-separated list) +## Use this instead of `plugins` to add new plugins +## +extraPlugins: "rabbitmq_auth_backend_ldap" + +## Clustering settings +## +clustering: + ## @param clustering.enabled Enable RabbitMQ clustering + ## + enabled: true + ## @param clustering.addressType Switch clustering mode. Either `ip` or `hostname` + ## + addressType: hostname + ## @param clustering.rebalance Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + ## + rebalance: false + ## @param clustering.forceBoot Force boot of an unexpectedly shut down cluster (in an unexpected order). + ## forceBoot executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order + ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot + ## + forceBoot: false + ## @param clustering.partitionHandling Switch Partition Handling Strategy. Either `autoheal` or `pause_minority` or `pause_if_all_down` or `ignore` + ## ref: https://www.rabbitmq.com/partitions.html#automatic-handling + ## + partitionHandling: autoheal + +## Loading a RabbitMQ definitions file to configure RabbitMQ +## +loadDefinition: + ## @param loadDefinition.enabled Enable loading a RabbitMQ definitions file to configure RabbitMQ + ## + enabled: false + ## @param loadDefinition.file Name of the definitions file + ## + file: "/app/load_definition.json" + ## @param loadDefinition.existingSecret Existing secret with the load definitions file + ## Can be templated if needed, e.g: + ## existingSecret: "{{ .Release.Name }}-load-definition" + ## + existingSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: [] +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param lifecycleHooks Overwrite livecycle for the RabbitMQ container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param terminationGracePeriodSeconds Default duration in seconds k8s waits for container to exit before sending kill signal. +## Any time in excess of 10 seconds will be spent waiting for any synchronization necessary for cluster not to lose data. +## +terminationGracePeriodSeconds: 120 +## @param extraEnvVars Extra environment variables to add to RabbitMQ pods +## E.g: +## extraEnvVars: +## - name: FOO +## value: BAR +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra environment variables (in case of sensitive data) +## +extraEnvVarsSecret: "" + +## Container Ports +## @param containerPorts.amqp +## @param containerPorts.amqpTls +## @param containerPorts.dist +## @param containerPorts.manager +## @param containerPorts.epmd +## @param containerPorts.metrics +## +containerPorts: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + epmd: 4369 + metrics: 9419 + +## @param initScripts Dictionary of init scripts. Evaluated as a template. +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## For example: +## initScripts: +## my_init_script.sh: | +## #!/bin/sh +## echo "Do something." +## +initScripts: {} +## @param initScriptsCM ConfigMap with the init scripts. Evaluated as a template. +## Note: This will override initScripts +## +initScriptsCM: "" +## @param initScriptsSecret Secret containing `/docker-entrypoint-initdb.d` scripts to be executed at initialization time that contain sensitive data. Evaluated as a template. +## +initScriptsSecret: "" +## @param extraContainerPorts Extra ports to be included in container spec, primarily informational +## E.g: +## extraContainerPorts: +## - name: new_port_name +## containerPort: 1234 +## +extraContainerPorts: [] +## @param configuration [string] RabbitMQ Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. +## To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +## + +## RabbitMQ tcp_listen_options parameters +## See : https://www.rabbitmq.com/networking.html for additional information +## +tcpListenOptions: + ## @param tcpListenOptions.backlog Maximum size of the unaccepted TCP connections queue + ## + backlog: 128 + ## @param tcpListenOptions.nodelay When set to true, deactivates Nagle's algorithm. Default is true. Highly recommended for most users. + ## + nodelay: true + ## tcpListenOptions.linger + ## + linger: + ## @param tcpListenOptions.linger.lingerOn Enable Server socket lingering + ## + lingerOn: true + ## @param tcpListenOptions.linger.timeout Server Socket lingering timeout + ## + timeout: 0 + ## @param tcpListenOptions.keepalive When set to true, enables TCP keepalives + ## + keepalive: false + +configuration: |- + ## Username and password + ## + default_user = {{ .Values.auth.username }} + {{- if and (not .Values.auth.securePassword) .Values.auth.password }} + default_pass = {{ .Values.auth.password }} + {{- end }} + {{- if .Values.clustering.enabled }} + ## Clustering + ## + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = {{ .Values.clustering.partitionHandling }} + {{- end }} + {{ if and .Values.clustering.enabled .Values.loadDefinition.enabled }} + cluster_formation.target_cluster_size_hint = {{ .Values.replicaCount }} + {{ end }} + {{- if .Values.loadDefinition.enabled }} + load_definitions = {{ .Values.loadDefinition.file }} + {{- end }} + # queue master locator + queue_master_locator = min-masters + # enable loopback user + {{- if not (empty .Values.auth.username) }} + loopback_users.{{ .Values.auth.username }} = {{ .Values.auth.enableLoopbackUser }} + {{- else}} + loopback_users.guest = {{ .Values.auth.enableLoopbackUser }} + {{- end }} + {{ template "rabbitmq.extraConfiguration" . }} + {{- if .Values.auth.tls.enabled }} + ssl_options.verify = {{ .Values.auth.tls.sslOptionsVerify }} + listeners.ssl.default = {{ .Values.service.ports.amqpTls }} + ssl_options.fail_if_no_peer_cert = {{ .Values.auth.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem + {{- if .Values.auth.tls.sslOptionsPassword.enabled }} + ssl_options.password = {{ template "rabbitmq.tlsSslOptionsPassword" . }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + auth_backends.1.authn = ldap + auth_backends.1.authz = {{ ternary "ldap" "internal" .Values.ldap.authorisationEnabled }} + auth_backends.2 = internal + {{- $host := list }} + {{- $port := ternary 636 389 .Values.ldap.tls.enabled }} + {{- if .Values.ldap.uri }} + {{- $hostPort := get (urlParse .Values.ldap.uri) "host" }} + {{- $host = list (index (splitList ":" $hostPort) 0) -}} + {{- if (contains ":" $hostPort) }} + {{- $port = index (splitList ":" $hostPort) 1 -}} + {{- end }} + {{- end }} + {{- range $index, $server := concat $host .Values.ldap.servers }} + auth_ldap.servers.{{ add $index 1 }} = {{ $server }} + {{- end }} + auth_ldap.port = {{ coalesce .Values.ldap.port $port }} + {{- if or .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + auth_ldap.user_dn_pattern = {{ coalesce .Values.ldap.user_dn_pattern .Values.ldap.userDnPattern }} + {{- end }} + {{- if .Values.ldap.basedn }} + auth_ldap.dn_lookup_base = {{ .Values.ldap.basedn }} + {{- end }} + {{- if .Values.ldap.uidField }} + auth_ldap.dn_lookup_attribute = {{ .Values.ldap.uidField }} + {{- end }} + {{- if .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.user_dn = {{ .Values.ldap.binddn }} + auth_ldap.dn_lookup_bind.password = {{ required "'ldap.bindpw' is required when 'ldap.binddn' is defined" .Values.ldap.bindpw }} + {{- end }} + {{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = {{ not .Values.ldap.tls.startTls }} + auth_ldap.use_starttls = {{ .Values.ldap.tls.startTls }} + {{- if .Values.ldap.tls.CAFilename }} + auth_ldap.ssl_options.cacertfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.CAFilename }} + {{- end }} + {{- if .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.certfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ .Values.ldap.tls.certFilename }} + auth_ldap.ssl_options.keyfile = {{ .Values.ldap.tls.certificatesMountPath }}/{{ required "'ldap.tls.certKeyFilename' is required when 'ldap.tls.certFilename' is defined" .Values.ldap.tls.certKeyFilename }} + {{- end }} + {{- if .Values.ldap.tls.skipVerify }} + auth_ldap.ssl_options.verify = verify_none + auth_ldap.ssl_options.fail_if_no_peer_cert = false + {{- else if .Values.ldap.tls.verify }} + auth_ldap.ssl_options.verify = {{ .Values.ldap.tls.verify }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + ## Prometheus metrics + ## + prometheus.tcp.port = {{ .Values.containerPorts.metrics }} + {{- end }} + {{- if .Values.memoryHighWatermark.enabled }} + ## Memory Threshold + ## + total_memory_available_override_value = {{ include "rabbitmq.toBytes" .Values.resources.limits.memory }} + vm_memory_high_watermark.{{ .Values.memoryHighWatermark.type }} = {{ .Values.memoryHighWatermark.value }} + ## TCP Listen Options + ## + tcp_listen_options.backlog = {{ .Values.tcpListenOptions.backlog }} + tcp_listen_options.nodelay = {{ .Values.tcpListenOptions.nodelay }} + tcp_listen_options.linger.on = {{ .Values.tcpListenOptions.linger.lingerOn }} + tcp_listen_options.linger.timeout = {{ .Values.tcpListenOptions.linger.timeout }} + tcp_listen_options.keepalive = {{ .Values.tcpListenOptions.keepalive }} + {{- end }} + +## @param configurationExistingSecret Existing secret with the configuration to use as rabbitmq.conf. +## Must contain the key "rabbitmq.conf" +## Takes precedence over `configuration`, so do not use both simultaneously +## With providing an existingSecret, extraConfiguration and extraConfigurationExistingSecret do not take any effect +## +configurationExistingSecret: "" + +## @param extraConfiguration [string] Configuration file content: extra configuration to be appended to RabbitMQ configuration +## Use this instead of `configuration` to add more configuration +## Do not use simultaneously with `extraConfigurationExistingSecret` +## +extraConfiguration: |- + #default_vhost = {{ .Release.Namespace }}-vhost + #disk_free_limit.absolute = 50MB + +## @param extraConfigurationExistingSecret Existing secret with the extra configuration to append to `configuration`. +## Must contain the key "extraConfiguration" +## Takes precedence over `extraConfiguration`, so do not use both simultaneously +## +extraConfigurationExistingSecret: "" + +## @param advancedConfiguration Configuration file content: advanced configuration +## Use this as additional configuration in classic config format (Erlang term configuration format) +## +## LDAP authorisation example: +## advancedConfiguration: |- +## [{rabbitmq_auth_backend_ldap,[ +## {tag_queries, [{administrator, {constant, true}}, +## {management, {constant, true}}]} +## ]}]. +## +## If both, advancedConfiguration and advancedConfigurationExistingSecret are set, then advancedConfiguration +## will be used instead of the secret. +# +advancedConfiguration: |- + +## @param advancedConfigurationExistingSecret Existing secret with the advanced configuration file (must contain a key `advanced.config`). +## Use this as additional configuration in classic config format (Erlang term configuration format) as in advancedConfiguration +## Do not use in combination with advancedConfiguration, will be ignored +## +advancedConfigurationExistingSecret: "" + +## This subsystem was introduced in RabbitMQ 3.8.0 to allow rolling upgrades of cluster members without shutting down the entire cluster. +## Feature flags are a mechanism that controls what features are considered to be enabled or available on all cluster nodes. If a feature flag is enabled, so is its associated feature (or behavior). If not then all nodes in the cluster will disable the feature (behavior). +## e.g, drop_unroutable_metric,empty_basic_get_metric,implicit_default_bindings,maintenance_mode_status,quorum_queue,virtual_host_metadata +## @param featureFlags that controls what features are considered to be enabled or available on all cluster nodes. +## +featureFlags: "" + +## LDAP configuration +## +ldap: + ## @param ldap.enabled Enable LDAP support + ## + enabled: false + ## @param ldap.uri LDAP connection string. + ## + uri: "" + ## @param ldap.servers List of LDAP servers hostnames. This is valid only if ldap.uri is not set + ## + servers: [] + ## @param ldap.port LDAP servers port. This is valid only if ldap.uri is not set + ## + port: "" + + ## DEPRECATED ldap.user_dn_pattern it will removed in a future, please use userDnPattern instead + ## Pattern used to translate the provided username into a value to be used for the LDAP bind + ## @param ldap.userDnPattern Pattern used to translate the provided username into a value to be used for the LDAP bind. + ## ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + userDnPattern: "" + ## @param ldap.binddn DN of the account used to search in the LDAP server. + ## + binddn: "" + ## @param ldap.bindpw Password for binddn account. + ## + bindpw: "" + ## @param ldap.basedn Base DN path where binddn account will search for the users. + ## + basedn: "" + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ##��ref: https://www.rabbitmq.com/ldap.html#usernames-and-dns + ## + ## @param ldap.uidField Field used to match with the user name (uid, samAccountName, cn, etc). It matches with 'dn_lookup_attribute' in RabbitMQ configuration + ## + uidField: "" + ## @param ldap.authorisationEnabled Enable LDAP authorisation. Please set 'advancedConfiguration' with tag, topic, resources and vhost mappings + ## ref: https://www.rabbitmq.com/ldap.html#authorisation + ## + authorisationEnabled: false + ## @param ldap.tls.enabled Enabled TLS configuration. + ## @param ldap.tls.startTls Use STARTTLS instead of LDAPS. + ## @param ldap.tls.skipVerify Skip any SSL verification (hostanames or certificates) + ## @param ldap.tls.verify Verify connection. Valid values are 'verify_peer' or 'verify_none' + ## @param ldap.tls.certificatesMountPath Where LDAP certifcates are mounted. + ## @param ldap.tls.certificatesSecret Secret with LDAP certificates. + ## @param ldap.tls.CAFilename CA certificate filename. Should match with the CA entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certFilename Client certificate filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## @param ldap.tls.certKeyFilename Client Key filename to authenticate against the LDAP server. Should match with certificate the entry key in the ldap.tls.certificatesSecret. + ## + tls: + enabled: false + startTls: false + skipVerify: false + verify: "verify_peer" + certificatesMountPath: /opt/bitnami/rabbitmq/ldap/certs + certificatesSecret: "" + CAFilename: "" + certFilename: "" + certKeyFilename: "" + +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts +## Examples: +## extraVolumeMounts: +## - name: extras +## mountPath: /usr/share/extras +## readOnly: true +## +extraVolumeMounts: [] +## @param extraVolumes Optionally specify extra list of additional volumes . +## Example: +## extraVolumes: +## - name: extras +## emptyDir: {} +## +extraVolumes: [] +## @param extraSecrets Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## Example: +## extraSecrets: +## load-definition: +## load_definition.json: | +## { +## ... +## } +## +extraSecrets: {} +## @param extraSecretsPrependReleaseName Set this flag to true if extraSecrets should be created with prepended. +## +extraSecretsPrependReleaseName: false + +## @section Statefulset parameters +## + +## @param replicaCount Number of RabbitMQ replicas to deploy +## +replicaCount: 1 +## @param schedulerName Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## @param podManagementPolicy Pod management policy +## +podManagementPolicy: OrderedReady +## @param podLabels RabbitMQ Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations RabbitMQ Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param updateStrategy.type Update strategy type for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate +## @param statefulsetLabels RabbitMQ statefulset labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +statefulsetLabels: {} +## @param statefulsetAnnotations RabbitMQ statefulset annotations. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +statefulsetAnnotations: {} +## @param priorityClassName Name of the priority class to be used by RabbitMQ pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft + +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param affinity Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] + +## RabbitMQ pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable RabbitMQ pods' Security Context +## @param podSecurityContext.fsGroup Set RabbitMQ pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 + +## @param containerSecurityContext.enabled Enabled RabbitMQ containers' Security Context +## @param containerSecurityContext.runAsUser Set RabbitMQ containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set RabbitMQ container's Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Set container's privilege escalation +## @param containerSecurityContext.capabilities.drop Set container's Security Context runAsNonRoot +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + +## RabbitMQ containers' resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for RabbitMQ containers +## @param resources.requests The requested resources for RabbitMQ containers +## +resources: + ## Example: + ## limits: + ## cpu: 1000m + ## memory: 2Gi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 1000m + ## memory: 2Gi + ## + requests: {} + +## Configure RabbitMQ containers' extra options for liveness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 +## Configure RabbitMQ containers' extra options for readiness probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param readinessProbe.enabled Enable readinessProbe +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## Configure RabbitMQ containers' extra options for startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param startupProbe.enabled Enable startupProbe +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +## @param customLivenessProbe Override default liveness probe +## +customLivenessProbe: {} +## @param customReadinessProbe Override default readiness probe +## +customReadinessProbe: {} +## @param customStartupProbe Define a custom startup probe +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes +## +customStartupProbe: {} +## @param initContainers Add init containers to the RabbitMQ pod +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## @param sidecars Add sidecar containers to the RabbitMQ pod +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Enable/disable a Pod Disruption Budget creation + ## + create: false + ## @param pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + maxUnavailable: "" + +## @section RBAC parameters +## + +## RabbitMQ pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for RabbitMQ pods + ## + create: true + ## @param serviceAccount.name Name of the created serviceAccount + ## If not set and create is true, a name is generated using the rabbitmq.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Auto-mount the service account token in the pod + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. + ## + annotations: {} + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether RBAC rules should be created + ## binding RabbitMQ ServiceAccount to a role + ## that allows RabbitMQ pods querying the K8s API + ## + create: true + +## @section Persistence parameters +## + +persistence: + ## @param persistence.enabled Enable RabbitMQ data persistence using PVC + ## + enabled: true + ## @param persistence.storageClass PVC Storage Class for RabbitMQ data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.selector Selector to match an existing Persistent Volume + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.accessModes PVC Access Modes for RabbitMQ data volume + ## + accessModes: + - ReadWriteOnce + ## @param persistence.existingClaim Provide an existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + ## + existingClaim: "" + ## @param persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom RabbitMQ images + ## + mountPath: /bitnami/rabbitmq/mnesia + ## @param persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param persistence.size PVC Storage Request for RabbitMQ data volume + ## If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well + ## + size: 8Gi + ## @param persistence.annotations Persistence annotations. Evaluated as a template + ## Example: + ## annotations: + ## example.io/disk-volume-type: SSD + ## + annotations: {} + ## @param persistence.labels Persistence labels. Evaluated as a template + ## Example: + ## labels: + ## app: my-app + labels: {} + +## @section Exposure parameters +## + +## Kubernetes service type +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + + ## @param service.portEnabled Amqp port. Cannot be disabled when `auth.tls.enabled` is `false`. Listener can be disabled with `listeners.tcp = none`. + ## + portEnabled: true + ## @param service.distPortEnabled Erlang distribution server port + ## + distPortEnabled: true + ## @param service.managerPortEnabled RabbitMQ Manager port + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/rabbitmq#environment-variables + ## + managerPortEnabled: true + ## @param service.epmdPortEnabled RabbitMQ EPMD Discovery service port + ## + epmdPortEnabled: true + ## Service ports + ## @param service.ports.amqp Amqp service port + ## @param service.ports.amqpTls Amqp TLS service port + ## @param service.ports.dist Erlang distribution service port + ## @param service.ports.manager RabbitMQ Manager service port + ## @param service.ports.metrics RabbitMQ Prometheues metrics service port + ## @param service.ports.epmd EPMD Discovery service port + ## + ports: + amqp: 5672 + amqpTls: 5671 + dist: 25672 + manager: 15672 + metrics: 9419 + epmd: 4369 + ## Service ports name + ## @param service.portNames.amqp Amqp service port name + ## @param service.portNames.amqpTls Amqp TLS service port name + ## @param service.portNames.dist Erlang distribution service port name + ## @param service.portNames.manager RabbitMQ Manager service port name + ## @param service.portNames.metrics RabbitMQ Prometheues metrics service port name + ## @param service.portNames.epmd EPMD Discovery service port name + ## + portNames: + amqp: "amqp" + amqpTls: "amqp-tls" + dist: "dist" + manager: "http-stats" + metrics: "metrics" + epmd: "epmd" + + ## Node ports to expose + ## @param service.nodePorts.amqp Node port for Ampq + ## @param service.nodePorts.amqpTls Node port for Ampq TLS + ## @param service.nodePorts.dist Node port for Erlang distribution + ## @param service.nodePorts.manager Node port for RabbitMQ Manager + ## @param service.nodePorts.epmd Node port for EPMD Discovery + ## @param service.nodePorts.metrics Node port for RabbitMQ Prometheues metrics + ## + nodePorts: + amqp: "" + amqpTls: "" + dist: "" + manager: "" + epmd: "" + metrics: "" + ## @param service.extraPorts Extra ports to expose in the service + ## E.g.: + ## extraPorts: + ## - name: new_svc_name + ## port: 1234 + ## targetPort: 1234 + ## + extraPorts: [] + ## @param service.loadBalancerSourceRanges Address(es) that are allowed when service is `LoadBalancer` + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalIPs Set the ExternalIPs + ## + externalIPs: [] + ## @param service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.loadBalancerIP Set the LoadBalancerIP + ## + loadBalancerIP: "" + ## @param service.clusterIP Kubernetes service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.labels Service labels. Evaluated as a template + ## + labels: {} + ## @param service.annotations Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + ## + annotations: {} + ## DEPRECATED service.annotationsHeadless it will removed in a future release, please use service.headless.annotations instead + ## @param service.annotationsHeadless Headless Service annotations. Evaluated as a template + ## Example: + ## annotations: + ## external-dns.alpha.kubernetes.io/internal-hostname: rabbitmq.example.com + ## + annotationsHeadless: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + +## Configure the ingress resource that allows you to access the +## RabbitMQ installation. Set up the URL +## ref: https://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress resource for Management console + ## + enabled: false + + ## @param ingress.path Path for the default host. You may need to set this to '/*' in order to use this with ALB ingress controllers. + ## + path: / + + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.hostname Default host for the ingress resource + ## + hostname: rabbitmq.local + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the hostname defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" .Values.ingress.hostname }} + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Set this to true in order to create a TLS secret for this ingress record + ## using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. + ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## e.g: + ## extraHosts: + ## - name: rabbitmq.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraRules The list of additional rules to be added to this ingress record. Evaluated as a template + ## Useful when looking for additional customization, such as using different backend + ## + extraRules: [] + ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. + ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - rabbitmq.local + ## secretName: rabbitmq.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: rabbitmq.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.existingSecret It is you own the certificate as secret. + ## + existingSecret: "" + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.additionalRules Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## e.g: + ## additionalRules: + ## - matchLabels: + ## - role: frontend + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + additionalRules: [] + +## @section Metrics Parameters +## + +## Prometheus Metrics +## +metrics: + ## @param metrics.enabled Enable exposing RabbitMQ metrics to be gathered by Prometheus + ## + enabled: false + ## @param metrics.plugins Plugins to enable Prometheus metrics in RabbitMQ + ## + plugins: "rabbitmq_prometheus" + ## Prometheus pod annotations + ## @param metrics.podAnnotations [object] Annotations for enabling prometheus to access the metrics endpoint + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.service.ports.metrics }}" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Specify the namespace in which the serviceMonitor resource will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Specify the interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout Specify the timeout after which the scrape is ended + ## e.g: + ## scrapeTimeout: 30s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping. + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricsRelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + ## @param metrics.serviceMonitor.targetLabels Used to keep given service's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + targetLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Used to keep given pod's labels in target + ## e.g: + ## - app.kubernetes.io/name + ## + podTargetLabels: {} + ## @param metrics.serviceMonitor.path Define the path used by ServiceMonitor to scrap metrics + ## Could be /metrics for aggregated metrics or /metrics/per-object for more details + ## + path: "" + ## @param metrics.serviceMonitor.params Define the HTTP URL parameters used by ServiceMonitor + ## + params: {} + ## @param metrics.serviceMonitor.selector ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor + ## + labels: {} + ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor + ## + annotations: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Set this to true to create prometheusRules for Prometheus operator + ## + enabled: false + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so prometheusRules will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.namespace namespace where prometheusRules resource should be created + ## + namespace: "" + ## List of rules, used as template by Helm. + ## @param metrics.prometheusRule.rules List of rules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## rules: + ## - alert: RabbitmqDown + ## expr: rabbitmq_up{service="{{ template "common.names.fullname" . }}"} == 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + ## description: RabbitMQ node down + ## - alert: ClusterDown + ## expr: | + ## sum(rabbitmq_running{service="{{ template "common.names.fullname" . }}"}) + ## < {{ .Values.replicaCount }} + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Less than {{ .Values.replicaCount }} nodes running in RabbitMQ cluster + ## VALUE = {{ "{{ $value }}" }} + ## - alert: ClusterPartition + ## expr: rabbitmq_partitions{service="{{ template "common.names.fullname" . }}"} > 0 + ## for: 5m + ## labels: + ## severity: error + ## annotations: + ## summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Cluster partition + ## VALUE = {{ "{{ $value }}" }} + ## - alert: OutOfMemory + ## expr: | + ## rabbitmq_node_mem_used{service="{{ template "common.names.fullname" . }}"} + ## / rabbitmq_node_mem_limit{service="{{ template "common.names.fullname" . }}"} + ## * 100 > 90 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + ## LABELS: {{ "{{ $labels }}" }} + ## - alert: TooManyConnections + ## expr: rabbitmq_connectionsTotal{service="{{ template "common.names.fullname" . }}"} > 1000 + ## for: 5m + ## labels: + ## severity: warning + ## annotations: + ## summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + ## description: | + ## RabbitMQ instance has too many connections (> 1000) + ## VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + ## + rules: [] + +## @section Init Container Parameters +## + +## Init Container parameters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r90 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 diff --git a/install-bundler/install-redis/custom-values.yaml b/install-bundler/install-redis/custom-values.yaml new file mode 100644 index 00000000..1195d455 --- /dev/null +++ b/install-bundler/install-redis/custom-values.yaml @@ -0,0 +1,26 @@ + + +## Redis® Authentication parameters +architecture: replication +master: + persistence: + storageClass: "premium-rwo" + resources: + ## Example: + limits: + cpu: 250m + memory: 250Mi + requests: + cpu: 512m + memory: 512Mi + +replica: + replicaCount: 2 + resources: + ## Example: + limits: + cpu: 250m + memory: 250Mi + requests: + cpu: 512m + memory: 512Mi \ No newline at end of file diff --git a/install-bundler/install-redis/install.sh b/install-bundler/install-redis/install.sh new file mode 100644 index 00000000..e741178d --- /dev/null +++ b/install-bundler/install-redis/install.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -e + +NC='\033[0m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' + +NAMESPACE=$1 +REDIS_MASTER_REPLICA=$2 +REDIS_READ_REPLICA=$3 +HELM_RELEASE="redis" +REDIS_ROOT_PASSWORD=IAMredis985834 + +if [ "$#" -ne 3 ]; then + echo "Usage: $0 for Redis" + exit 1 +fi +printf "\n${YELLOW}------ Deploying %s to %s -----${NC}\n" "$HELM_RELEASE" "$NAMESPACE" + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/redis/" \ + -n "$NAMESPACE" \ + -f "$DIR/redis/values.yaml" \ + -f "$DIR/custom-values.yaml" \ + --set master.replicaCount=$REDIS_MASTER_REPLICA \ + --set replica.replicaCount=$REDIS_READ_REPLICA \ + --set auth.password=$REDIS_ROOT_PASSWORD \ + --set nameOverride=$HELM_RELEASE +printf "Mongo deployment completed\n"; +printf "Redis url %s<>%s\n" "$GREEN" "$REDIS_ROOT_PASSWORD" "$HELM_RELEASE" "$NAMESPACE" "$NC"; + +# Print help statements +echo "To debug Redis, you can use the following commands:" +echo "#kubectl run redis-debug --rm -i --tty --image redis:latest -- bash" +echo "If you want to benchmark your redis installation" +echo "redis-benchmark -h 127.0.0.1 -p -c 100 -n 100000 -a " +echo "${GREEN}#redis-cli -h redis-master.$NAMESPACE.svc.cluster.local -a $REDIS_ROOT_PASSWORD${NC}" diff --git a/install-bundler/install-redis/redis/.helmignore b/install-bundler/install-redis/redis/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/install-bundler/install-redis/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/install-bundler/install-redis/redis/Chart.lock b/install-bundler/install-redis/redis/Chart.lock new file mode 100644 index 00000000..f5fb20a7 --- /dev/null +++ b/install-bundler/install-redis/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + version: 2.13.0 +digest: sha256:6b6084c51b6a028a651f6e8539d0197487ee807c5bae44867d4ea6ccd1f9ae93 +generated: "2023-09-29T11:06:04.261917+02:00" diff --git a/install-bundler/install-redis/redis/Chart.yaml b/install-bundler/install-redis/redis/Chart.yaml new file mode 100644 index 00000000..3d9a36f9 --- /dev/null +++ b/install-bundler/install-redis/redis/Chart.yaml @@ -0,0 +1,36 @@ +annotations: + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r60 + - name: redis-exporter + image: docker.io/bitnami/redis-exporter:1.54.0-debian-11-r0 + - name: redis-sentinel + image: docker.io/bitnami/redis-sentinel:7.2.1-debian-11-r0 + - name: redis + image: docker.io/bitnami/redis:7.2.1-debian-11-r0 + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 7.2.1 +dependencies: +- name: common + repository: oci://registry-1.docker.io/bitnamicharts + tags: + - bitnami-common + version: 2.x.x +description: Redis(R) is an open source, advanced key-value store. It is often referred + to as a data structure server since keys can contain strings, hashes, lists, sets + and sorted sets. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: redis +sources: +- https://github.com/bitnami/charts/tree/main/bitnami/redis +version: 18.1.2 diff --git a/install-bundler/install-redis/redis/README.md b/install-bundler/install-redis/redis/README.md new file mode 100644 index 00000000..be765f3f --- /dev/null +++ b/install-bundler/install-redis/redis/README.md @@ -0,0 +1,988 @@ + + +# Bitnami package for Redis(R) + +Redis(R) is an open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets. + +[Overview of Redis®](http://redis.io) + +Disclaimer: Redis is a registered trademark of Redis Ltd. Any rights therein are reserved to Redis Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Ltd. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/redis +``` + +## Introduction + +This chart bootstraps a [Redis®](https://github.com/bitnami/containers/tree/main/bitnami/redis) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +### Choose between Redis® Helm Chart and Redis® Cluster Helm Chart + +You can choose any of the two Redis® Helm charts for deploying a Redis® cluster. + +1. [Redis® Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis) will deploy a master-replica cluster, with the [option](https://github.com/bitnami/charts/tree/main/bitnami/redis#redis-sentinel-configuration-parameters) of enabling using Redis® Sentinel. +2. [Redis® Cluster Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/redis-cluster) will deploy a Redis® Cluster topology with sharding. + +The main features of each chart are the following: + +| Redis® | Redis® Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![Redis® Topology](img/redis-topology.png) | ![Redis® Cluster Topology](img/redis-cluster-topology.png) | + +Looking to use Redisreg; in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/redis +``` + +The command deploys Redis® on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ------------------------------------------------------ | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | +| `global.redis.password` | Global Redis® password (overrides `auth.password`) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `secretAnnotations` | Annotations to add to secret | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `useHostnames` | Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address | `true` | +| `nameResolutionThreshold` | Failure threshold for internal hostnames resolution | `5` | +| `nameResolutionTimeout` | Timeout seconds between probes for internal hostnames resolution | `5` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### Redis® Image parameters + +| Name | Description | Value | +| ------------------- | ---------------------------------------------------------------------------------------------------------- | -------------------- | +| `image.registry` | Redis® image registry | `docker.io` | +| `image.repository` | Redis® image repository | `bitnami/redis` | +| `image.tag` | Redis® image tag (immutable tags are recommended) | `7.2.1-debian-11-r0` | +| `image.digest` | Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Redis® image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Redis® image pull secrets | `[]` | +| `image.debug` | Enable image debug mode | `false` | + +### Redis® common configuration parameters + +| Name | Description | Value | +| -------------------------------- | ------------------------------------------------------------------------------------- | ------------- | +| `architecture` | Redis® architecture. Allowed values: `standalone` or `replication` | `replication` | +| `auth.enabled` | Enable password authentication | `true` | +| `auth.sentinel` | Enable password authentication on sentinels too | `true` | +| `auth.password` | Redis® password | `""` | +| `auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` | +| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` | +| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` | +| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis® nodes | `""` | + +### Redis® master configuration parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------ | +| `master.count` | Number of Redis® master instances to deploy (experimental, requires additional configuration) | `1` | +| `master.configuration` | Configuration for Redis® master nodes | `""` | +| `master.disableCommands` | Array with Redis® commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` | +| `master.command` | Override default container command (useful when using custom images) | `[]` | +| `master.args` | Override default container args (useful when using custom images) | `[]` | +| `master.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `master.preExecCmds` | Additional commands to run prior to starting Redis® master | `[]` | +| `master.extraFlags` | Array with additional command line flags for Redis® master | `[]` | +| `master.extraEnvVars` | Array with extra environment variables to add to Redis® master nodes | `[]` | +| `master.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® master nodes | `""` | +| `master.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® master nodes | `""` | +| `master.containerPorts.redis` | Container port to open on Redis® master nodes | `6379` | +| `master.startupProbe.enabled` | Enable startupProbe on Redis® master nodes | `false` | +| `master.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `20` | +| `master.startupProbe.periodSeconds` | Period seconds for startupProbe | `5` | +| `master.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `master.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `master.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `master.livenessProbe.enabled` | Enable livenessProbe on Redis® master nodes | `true` | +| `master.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `master.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `master.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `master.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `master.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `master.readinessProbe.enabled` | Enable readinessProbe on Redis® master nodes | `true` | +| `master.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `master.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `master.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `master.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `master.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `master.resources.limits` | The resources limits for the Redis® master containers | `{}` | +| `master.resources.requests` | The requested resources for the Redis® master containers | `{}` | +| `master.podSecurityContext.enabled` | Enabled Redis® master pods' Security Context | `true` | +| `master.podSecurityContext.fsGroup` | Set Redis® master pod's Security Context fsGroup | `1001` | +| `master.containerSecurityContext.enabled` | Enabled Redis® master containers' Security Context | `true` | +| `master.containerSecurityContext.runAsUser` | Set Redis® master containers' Security Context runAsUser | `1001` | +| `master.containerSecurityContext.runAsGroup` | Set Redis® master containers' Security Context runAsGroup | `0` | +| `master.containerSecurityContext.runAsNonRoot` | Set Redis® master containers' Security Context runAsNonRoot | `true` | +| `master.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate Redis® pod(s) privileges | `false` | +| `master.containerSecurityContext.seccompProfile.type` | Set Redis® master containers' Security Context seccompProfile | `RuntimeDefault` | +| `master.containerSecurityContext.capabilities.drop` | Set Redis® master containers' Security Context capabilities to drop | `["ALL"]` | +| `master.kind` | Use either Deployment or StatefulSet (default) | `StatefulSet` | +| `master.schedulerName` | Alternate scheduler for Redis® master pods | `""` | +| `master.updateStrategy.type` | Redis® master statefulset strategy type | `RollingUpdate` | +| `master.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `master.priorityClassName` | Redis® master pods' priorityClassName | `""` | +| `master.hostAliases` | Redis® master pods host aliases | `[]` | +| `master.podLabels` | Extra labels for Redis® master pods | `{}` | +| `master.podAnnotations` | Annotations for Redis® master pods | `{}` | +| `master.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® master pods | `false` | +| `master.podAffinityPreset` | Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `master.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `master.nodeAffinityPreset.key` | Node label key to match. Ignored if `master.affinity` is set | `""` | +| `master.nodeAffinityPreset.values` | Node label values to match. Ignored if `master.affinity` is set | `[]` | +| `master.affinity` | Affinity for Redis® master pods assignment | `{}` | +| `master.nodeSelector` | Node labels for Redis® master pods assignment | `{}` | +| `master.tolerations` | Tolerations for Redis® master pods assignment | `[]` | +| `master.topologySpreadConstraints` | Spread Constraints for Redis® master pod assignment | `[]` | +| `master.dnsPolicy` | DNS Policy for Redis® master pod | `""` | +| `master.dnsConfig` | DNS Configuration for Redis® master pod | `{}` | +| `master.lifecycleHooks` | for the Redis® master container(s) to automate configuration before or after startup | `{}` | +| `master.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® master pod(s) | `[]` | +| `master.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® master container(s) | `[]` | +| `master.sidecars` | Add additional sidecar containers to the Redis® master pod(s) | `[]` | +| `master.initContainers` | Add additional init containers to the Redis® master pod(s) | `[]` | +| `master.persistence.enabled` | Enable persistence on Redis® master nodes using Persistent Volume Claims | `true` | +| `master.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `master.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `master.persistence.path` | The path the volume will be mounted at on Redis® master containers | `/data` | +| `master.persistence.subPath` | The subdirectory of the volume to mount on Redis® master containers | `""` | +| `master.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® master containers | `""` | +| `master.persistence.storageClass` | Persistent Volume storage class | `""` | +| `master.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `master.persistence.size` | Persistent Volume size | `8Gi` | +| `master.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `master.persistence.labels` | Additional custom labels for the PVC | `{}` | +| `master.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `master.persistence.dataSource` | Custom PVC data source | `{}` | +| `master.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `master.service.type` | Redis® master service type | `ClusterIP` | +| `master.service.ports.redis` | Redis® master service port | `6379` | +| `master.service.nodePorts.redis` | Node port for Redis® master | `""` | +| `master.service.externalTrafficPolicy` | Redis® master service external traffic policy | `Cluster` | +| `master.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `master.service.internalTrafficPolicy` | Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` | +| `master.service.clusterIP` | Redis® master service Cluster IP | `""` | +| `master.service.loadBalancerIP` | Redis® master service Load Balancer IP | `""` | +| `master.service.loadBalancerSourceRanges` | Redis® master service Load Balancer sources | `[]` | +| `master.service.externalIPs` | Redis® master service External IPs | `[]` | +| `master.service.annotations` | Additional custom annotations for Redis® master service | `{}` | +| `master.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `master.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` | +| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `master.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `master.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `master.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + +### Redis® replicas configuration parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------ | +| `replica.replicaCount` | Number of Redis® replicas to deploy | `3` | +| `replica.configuration` | Configuration for Redis® replicas nodes | `""` | +| `replica.disableCommands` | Array with Redis® commands to disable on replicas nodes | `["FLUSHDB","FLUSHALL"]` | +| `replica.command` | Override default container command (useful when using custom images) | `[]` | +| `replica.args` | Override default container args (useful when using custom images) | `[]` | +| `replica.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `replica.preExecCmds` | Additional commands to run prior to starting Redis® replicas | `[]` | +| `replica.extraFlags` | Array with additional command line flags for Redis® replicas | `[]` | +| `replica.extraEnvVars` | Array with extra environment variables to add to Redis® replicas nodes | `[]` | +| `replica.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® replicas nodes | `""` | +| `replica.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® replicas nodes | `""` | +| `replica.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `replica.externalMaster.host` | External master host to bootstrap from | `""` | +| `replica.externalMaster.port` | Port for Redis service external master host | `6379` | +| `replica.containerPorts.redis` | Container port to open on Redis® replicas nodes | `6379` | +| `replica.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `true` | +| `replica.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `replica.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `replica.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `replica.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `replica.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `replica.livenessProbe.enabled` | Enable livenessProbe on Redis® replicas nodes | `true` | +| `replica.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `replica.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` | +| `replica.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `replica.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `replica.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `replica.readinessProbe.enabled` | Enable readinessProbe on Redis® replicas nodes | `true` | +| `replica.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `replica.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `replica.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `replica.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `replica.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `replica.resources.limits` | The resources limits for the Redis® replicas containers | `{}` | +| `replica.resources.requests` | The requested resources for the Redis® replicas containers | `{}` | +| `replica.podSecurityContext.enabled` | Enabled Redis® replicas pods' Security Context | `true` | +| `replica.podSecurityContext.fsGroup` | Set Redis® replicas pod's Security Context fsGroup | `1001` | +| `replica.containerSecurityContext.enabled` | Enabled Redis® replicas containers' Security Context | `true` | +| `replica.containerSecurityContext.runAsUser` | Set Redis® replicas containers' Security Context runAsUser | `1001` | +| `replica.containerSecurityContext.runAsGroup` | Set Redis® replicas containers' Security Context runAsGroup | `0` | +| `replica.containerSecurityContext.runAsNonRoot` | Set Redis® replicas containers' Security Context runAsNonRoot | `true` | +| `replica.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® replicas pod's Security Context allowPrivilegeEscalation | `false` | +| `replica.containerSecurityContext.seccompProfile.type` | Set Redis® replicas containers' Security Context seccompProfile | `RuntimeDefault` | +| `replica.containerSecurityContext.capabilities.drop` | Set Redis® replicas containers' Security Context capabilities to drop | `["ALL"]` | +| `replica.schedulerName` | Alternate scheduler for Redis® replicas pods | `""` | +| `replica.updateStrategy.type` | Redis® replicas statefulset strategy type | `RollingUpdate` | +| `replica.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` | +| `replica.priorityClassName` | Redis® replicas pods' priorityClassName | `""` | +| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` | +| `replica.hostAliases` | Redis® replicas pods host aliases | `[]` | +| `replica.podLabels` | Extra labels for Redis® replicas pods | `{}` | +| `replica.podAnnotations` | Annotations for Redis® replicas pods | `{}` | +| `replica.shareProcessNamespace` | Share a single process namespace between all of the containers in Redis® replicas pods | `false` | +| `replica.podAffinityPreset` | Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `replica.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `replica.nodeAffinityPreset.key` | Node label key to match. Ignored if `replica.affinity` is set | `""` | +| `replica.nodeAffinityPreset.values` | Node label values to match. Ignored if `replica.affinity` is set | `[]` | +| `replica.affinity` | Affinity for Redis® replicas pods assignment | `{}` | +| `replica.nodeSelector` | Node labels for Redis® replicas pods assignment | `{}` | +| `replica.tolerations` | Tolerations for Redis® replicas pods assignment | `[]` | +| `replica.topologySpreadConstraints` | Spread Constraints for Redis® replicas pod assignment | `[]` | +| `replica.dnsPolicy` | DNS Policy for Redis® replica pods | `""` | +| `replica.dnsConfig` | DNS Configuration for Redis® replica pods | `{}` | +| `replica.lifecycleHooks` | for the Redis® replica container(s) to automate configuration before or after startup | `{}` | +| `replica.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® replicas pod(s) | `[]` | +| `replica.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) | `[]` | +| `replica.sidecars` | Add additional sidecar containers to the Redis® replicas pod(s) | `[]` | +| `replica.initContainers` | Add additional init containers to the Redis® replicas pod(s) | `[]` | +| `replica.persistence.enabled` | Enable persistence on Redis® replicas nodes using Persistent Volume Claims | `true` | +| `replica.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `replica.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `replica.persistence.path` | The path the volume will be mounted at on Redis® replicas containers | `/data` | +| `replica.persistence.subPath` | The subdirectory of the volume to mount on Redis® replicas containers | `""` | +| `replica.persistence.subPathExpr` | Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers | `""` | +| `replica.persistence.storageClass` | Persistent Volume storage class | `""` | +| `replica.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `replica.persistence.size` | Persistent Volume size | `8Gi` | +| `replica.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `replica.persistence.labels` | Additional custom labels for the PVC | `{}` | +| `replica.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `replica.persistence.dataSource` | Custom PVC data source | `{}` | +| `replica.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | +| `replica.service.type` | Redis® replicas service type | `ClusterIP` | +| `replica.service.ports.redis` | Redis® replicas service port | `6379` | +| `replica.service.nodePorts.redis` | Node port for Redis® replicas | `""` | +| `replica.service.externalTrafficPolicy` | Redis® replicas service external traffic policy | `Cluster` | +| `replica.service.internalTrafficPolicy` | Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` | +| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `replica.service.clusterIP` | Redis® replicas service Cluster IP | `""` | +| `replica.service.loadBalancerIP` | Redis® replicas service Load Balancer IP | `""` | +| `replica.service.loadBalancerSourceRanges` | Redis® replicas service Load Balancer sources | `[]` | +| `replica.service.annotations` | Additional custom annotations for Redis® replicas service | `{}` | +| `replica.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `replica.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `replica.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-replicas pods | `30` | +| `replica.autoscaling.enabled` | Enable replica autoscaling settings | `false` | +| `replica.autoscaling.minReplicas` | Minimum replicas for the pod autoscaling | `1` | +| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` | +| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` | +| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` | +| `replica.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `replica.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `replica.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `replica.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + +### Redis® Sentinel configuration parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` | +| `sentinel.image.registry` | Redis® Sentinel image registry | `docker.io` | +| `sentinel.image.repository` | Redis® Sentinel image repository | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | Redis® Sentinel image tag (immutable tags are recommended) | `7.2.1-debian-11-r0` | +| `sentinel.image.digest` | Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sentinel.image.pullPolicy` | Redis® Sentinel image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Redis® Sentinel image pull secrets | `[]` | +| `sentinel.image.debug` | Enable image debug mode | `false` | +| `sentinel.annotations` | Additional custom annotations for Redis® Sentinel resource | `{}` | +| `sentinel.masterSet` | Master set name | `mymaster` | +| `sentinel.quorum` | Sentinel Quorum | `2` | +| `sentinel.getMasterTimeout` | Amount of time to allow before get_sentinel_master_info() times out. | `99` | +| `sentinel.automateClusterRecovery` | Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. | `false` | +| `sentinel.redisShutdownWaitFailover` | Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). | `true` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a Redis® node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `180000` | +| `sentinel.parallelSyncs` | Number of replicas that can be reconfigured in parallel to use the new master after a failover | `1` | +| `sentinel.configuration` | Configuration for Redis® Sentinel nodes | `""` | +| `sentinel.command` | Override default container command (useful when using custom images) | `[]` | +| `sentinel.args` | Override default container args (useful when using custom images) | `[]` | +| `sentinel.enableServiceLinks` | Whether information about services should be injected into pod's environment variable | `true` | +| `sentinel.preExecCmds` | Additional commands to run prior to starting Redis® Sentinel | `[]` | +| `sentinel.extraEnvVars` | Array with extra environment variables to add to Redis® Sentinel nodes | `[]` | +| `sentinel.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes | `""` | +| `sentinel.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for Redis® Sentinel nodes | `""` | +| `sentinel.externalMaster.enabled` | Use external master for bootstrapping | `false` | +| `sentinel.externalMaster.host` | External master host to bootstrap from | `""` | +| `sentinel.externalMaster.port` | Port for Redis service external master host | `6379` | +| `sentinel.containerPorts.sentinel` | Container port to open on Redis® Sentinel nodes | `26379` | +| `sentinel.startupProbe.enabled` | Enable startupProbe on Redis® Sentinel nodes | `true` | +| `sentinel.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `sentinel.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `sentinel.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `sentinel.startupProbe.failureThreshold` | Failure threshold for startupProbe | `22` | +| `sentinel.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `sentinel.livenessProbe.enabled` | Enable livenessProbe on Redis® Sentinel nodes | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `20` | +| `sentinel.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `sentinel.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `sentinel.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `sentinel.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `sentinel.readinessProbe.enabled` | Enable readinessProbe on Redis® Sentinel nodes | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `20` | +| `sentinel.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `sentinel.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `sentinel.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `sentinel.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `sentinel.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `sentinel.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `sentinel.persistence.enabled` | Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) | `false` | +| `sentinel.persistence.storageClass` | Persistent Volume storage class | `""` | +| `sentinel.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `sentinel.persistence.size` | Persistent Volume size | `100Mi` | +| `sentinel.persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `sentinel.persistence.labels` | Additional custom labels for the PVC | `{}` | +| `sentinel.persistence.selector` | Additional labels to match for the PVC | `{}` | +| `sentinel.persistence.dataSource` | Custom PVC data source | `{}` | +| `sentinel.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` | +| `sentinel.persistence.sizeLimit` | Set this to enable a size limit for `emptyDir` volumes. | `""` | +| `sentinel.resources.limits` | The resources limits for the Redis® Sentinel containers | `{}` | +| `sentinel.resources.requests` | The requested resources for the Redis® Sentinel containers | `{}` | +| `sentinel.containerSecurityContext.enabled` | Enabled Redis® Sentinel containers' Security Context | `true` | +| `sentinel.containerSecurityContext.runAsUser` | Set Redis® Sentinel containers' Security Context runAsUser | `1001` | +| `sentinel.containerSecurityContext.runAsGroup` | Set Redis® Sentinel containers' Security Context runAsGroup | `0` | +| `sentinel.containerSecurityContext.runAsNonRoot` | Set Redis® Sentinel containers' Security Context runAsNonRoot | `true` | +| `sentinel.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation | `false` | +| `sentinel.containerSecurityContext.seccompProfile.type` | Set Redis® Sentinel containers' Security Context seccompProfile | `RuntimeDefault` | +| `sentinel.containerSecurityContext.capabilities.drop` | Set Redis® Sentinel containers' Security Context capabilities to drop | `["ALL"]` | +| `sentinel.lifecycleHooks` | for the Redis® sentinel container(s) to automate configuration before or after startup | `{}` | +| `sentinel.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® Sentinel | `[]` | +| `sentinel.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) | `[]` | +| `sentinel.service.type` | Redis® Sentinel service type | `ClusterIP` | +| `sentinel.service.ports.redis` | Redis® service port for Redis® | `6379` | +| `sentinel.service.ports.sentinel` | Redis® service port for Redis® Sentinel | `26379` | +| `sentinel.service.nodePorts.redis` | Node port for Redis® | `""` | +| `sentinel.service.nodePorts.sentinel` | Node port for Sentinel | `""` | +| `sentinel.service.externalTrafficPolicy` | Redis® Sentinel service external traffic policy | `Cluster` | +| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `sentinel.service.clusterIP` | Redis® Sentinel service Cluster IP | `""` | +| `sentinel.service.loadBalancerIP` | Redis® Sentinel service Load Balancer IP | `""` | +| `sentinel.service.loadBalancerSourceRanges` | Redis® Sentinel service Load Balancer sources | `[]` | +| `sentinel.service.annotations` | Additional custom annotations for Redis® Sentinel service | `{}` | +| `sentinel.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | +| `sentinel.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `sentinel.service.headless.annotations` | Annotations for the headless service. | `{}` | +| `sentinel.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-node pods | `30` | + +### Other Parameters + +| Name | Description | Value | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.metrics.allowExternal` | Don't require client label for connections for metrics endpoint | `true` | +| `networkPolicy.metrics.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces to metrics endpoint | `{}` | +| `networkPolicy.metrics.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces to metrics endpoint | `{}` | +| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | +| `podSecurityPolicy.enabled` | Enable PodSecurityPolicy's RBAC rules | `false` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.rules` | Custom RBAC rules to set | `[]` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` | +| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` | +| `pdb.maxUnavailable` | Max number of pods that can be unavailable after the eviction | `""` | +| `tls.enabled` | Enable TLS traffic | `false` | +| `tls.authClients` | Require clients to authenticate | `true` | +| `tls.autoGenerated` | Enable autogenerated certificates | `false` | +| `tls.existingSecret` | The name of the existing secret that contains the TLS certificates | `""` | +| `tls.certificatesSecret` | DEPRECATED. Use existingSecret instead. | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate Key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | +| `tls.dhParamsFilename` | File containing DH params (in order to support DH based ciphers) | `""` | + +### Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` | +| `metrics.image.registry` | Redis® Exporter image registry | `docker.io` | +| `metrics.image.repository` | Redis® Exporter image repository | `bitnami/redis-exporter` | +| `metrics.image.tag` | Redis® Exporter image tag (immutable tags are recommended) | `1.54.0-debian-11-r0` | +| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` | +| `metrics.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `false` | +| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `metrics.livenessProbe.enabled` | Enable livenessProbe on Redis® replicas nodes | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `metrics.readinessProbe.enabled` | Enable readinessProbe on Redis® replicas nodes | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `metrics.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `metrics.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `metrics.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `metrics.command` | Override default metrics container init command (useful when using custom images) | `[]` | +| `metrics.redisTargetHost` | A way to specify an alternative Redis® hostname | `localhost` | +| `metrics.extraArgs` | Extra arguments for Redis® exporter, for example: | `{}` | +| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis® exporter | `[]` | +| `metrics.containerSecurityContext.enabled` | Enabled Redis® exporter containers' Security Context | `true` | +| `metrics.containerSecurityContext.runAsUser` | Set Redis® exporter containers' Security Context runAsUser | `1001` | +| `metrics.containerSecurityContext.runAsGroup` | Set Redis® exporter containers' Security Context runAsGroup | `0` | +| `metrics.containerSecurityContext.runAsNonRoot` | Set Redis® exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® exporter containers' Security Context allowPrivilegeEscalation | `false` | +| `metrics.containerSecurityContext.seccompProfile.type` | Set Redis® exporter containers' Security Context seccompProfile | `RuntimeDefault` | +| `metrics.containerSecurityContext.capabilities.drop` | Set Redis® exporter containers' Security Context capabilities to drop | `["ALL"]` | +| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® metrics sidecar | `[]` | +| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar | `[]` | +| `metrics.resources.limits` | The resources limits for the Redis® exporter container | `{}` | +| `metrics.resources.requests` | The requested resources for the Redis® exporter container | `{}` | +| `metrics.podLabels` | Extra labels for Redis® exporter pods | `{}` | +| `metrics.podAnnotations` | Annotations for Redis® exporter pods | `{}` | +| `metrics.service.type` | Redis® exporter service type | `ClusterIP` | +| `metrics.service.port` | Redis® exporter service port | `9121` | +| `metrics.service.externalTrafficPolicy` | Redis® exporter service external traffic policy | `Cluster` | +| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | +| `metrics.service.loadBalancerIP` | Redis® exporter service Load Balancer IP | `""` | +| `metrics.service.loadBalancerSourceRanges` | Redis® exporter service Load Balancer sources | `[]` | +| `metrics.service.annotations` | Additional custom annotations for Redis® exporter service | `{}` | +| `metrics.service.clusterIP` | Redis® exporter service Cluster IP | `""` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` | +| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | The timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabellings` | Metrics RelabelConfigs to apply to samples before scraping. | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | Metrics RelabelConfigs to apply to samples before ingestion. | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` | +| `metrics.serviceMonitor.sampleLimit` | Limit of how many samples should be scraped from every Pod | `false` | +| `metrics.serviceMonitor.targetLimit` | Limit of how many targets should be scraped | `false` | +| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` | +| `metrics.prometheusRule.rules` | Custom Prometheus rules | `[]` | + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------ | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | OS Shell + Utility image registry | `docker.io` | +| `volumePermissions.image.repository` | OS Shell + Utility image repository | `bitnami/os-shell` | +| `volumePermissions.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r60` | +| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | +| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` | +| `sysctl.image.registry` | OS Shell + Utility image registry | `docker.io` | +| `sysctl.image.repository` | OS Shell + Utility image repository | `bitnami/os-shell` | +| `sysctl.image.tag` | OS Shell + Utility image tag (immutable tags are recommended) | `11-debian-11-r60` | +| `sysctl.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `sysctl.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | +| `sysctl.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | +| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` | +| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctl.resources.limits` | The resources limits for the init container | `{}` | +| `sysctl.resources.requests` | The requested resources for the init container | `{}` | + +### useExternalDNS Parameters + +| Name | Description | Value | +| -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `useExternalDNS.enabled` | Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. | `false` | +| `useExternalDNS.additionalAnnotations` | Extra annotations to be utilized when `external-dns` is enabled. | `{}` | +| `useExternalDNS.annotationKey` | The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. | `external-dns.alpha.kubernetes.io/` | +| `useExternalDNS.suffix` | The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. | `""` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.password=secretpassword \ + oci://registry-1.docker.io/bitnamicharts/redis +``` + +The above command sets the Redis® server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Use a different Redis® version + +To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/redis/configuration/change-image-version/). + +### Bootstrapping with an External Cluster + +This chart is equipped with the ability to bring online a set of Pods that connect to an existing Redis deployment that lies outside of Kubernetes. This effectively creates a hybrid Redis Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single Redis Deployment. This is helpful in situations where one may be migrating Redis from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration: + +```yaml +replica: + externalMaster: + enabled: true + host: external-redis-0.internal +sentinel: + externalMaster: + enabled: true + host: external-redis-0.internal +``` + +:warning: This is currently limited to clusters in which Sentinel and Redis run on the same node! :warning: + +Please also note that the external sentinel must be listening on port `26379`, and this is currently not configurable. + +Once the Kubernetes Redis Deployment is online and confirmed to be working with the existing cluster, the configuration can then be removed and the cluster will remain connected. + +### External DNS + +This chart is equipped to allow leveraging the ExternalDNS project. Doing so will enable ExternalDNS to publish the FQDN for each instance, in the format of `..`. +Example, when using the following configuration: + +```yaml +useExternalDNS: + enabled: true + suffix: prod.example.org + additionalAnnotations: + ttl: 10 +``` + +On a cluster where the name of the Helm release is `a`, the hostname of a Pod is generated as: `a-redis-node-0.a-redis.prod.example.org`. The IP of that FQDN will match that of the associated Pod. This modifies the following parameters of the Redis/Sentinel configuration using this new FQDN: + +- `replica-announce-ip` +- `known-sentinel` +- `known-replica` +- `announce-ip` + +:warning: This requires a working installation of `external-dns` to be fully functional. :warning: + +See the [official ExternalDNS documentation](https://github.com/kubernetes-sigs/external-dns) for additional configuration options. + +### Cluster topologies + +#### Default: Master-Replicas + +When installing the chart with `architecture=replication`, it will deploy a Redis® master StatefulSet and a Redis® replicas StatefulSet. The replicas will be read-replicas of the master. Two services will be exposed: + +- Redis® Master service: Points to the master, where read-write operations can be performed +- Redis® Replicas service: Points to the replicas, where only read operations are allowed by default. + +In case the master crashes, the replicas will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Standalone + +When installing the chart with `architecture=standalone`, it will deploy a standalone Redis® StatefulSet. A single service will be exposed: + +- Redis® Master service: Points to the master, where read-write operations can be performed + +#### Master-Replicas with Sentinel + +When installing the chart with `architecture=replication` and `sentinel.enabled=true`, it will deploy a Redis® master StatefulSet (only one master allowed) and a Redis® replicas StatefulSet. In this case, the pods will contain an extra container with Redis® Sentinel. This container will form a cluster of Redis® Sentinel nodes, which will promote a new master in case the actual one fails. + +On graceful termination of the Redis® master pod, a failover of the master is initiated to promote a new master. The Redis® Sentinel container in this pod will wait for the failover to occur before terminating. If `sentinel.redisShutdownWaitFailover=true` is set (the default), the Redis® container will wait for the failover as well before terminating. This increases availability for reads during failover, but may cause stale reads until all clients have switched to the new master. + +In addition to this, only one service is exposed: + +- Redis® service: Exposes port 6379 for Redis® read-only operations and port 26379 for accessing Redis® Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the Redis® Sentinel cluster and query the current master using the command below (using redis-cli or similar): + +```console +SENTINEL get-master-addr-by-name +``` + +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`. + +### Multiple masters (experimental) + +When `master.count` is greater than `1`, special care must be taken to create a consistent setup. + +An example of use case is the creation of a redundant set of standalone masters or master-replicas per Kubernetes node where you must ensure: + +- No more than `1` master can be deployed per Kubernetes node +- Replicas and writers can only see the single master of their own Kubernetes node + +One way of achieving this is by setting `master.service.internalTrafficPolicy=Local` in combination with a `master.affinity.podAntiAffinity` spec to never schedule more than one master per Kubernetes node. + +It's recommended to only change `master.count` if you know what you are doing. +`master.count` greater than `1` is not designed for use when `sentinel.enabled=true`. + +### Using a password file + +To use a password file for Redis® you need to create a secret containing the password and then deploy the chart using that secret. + +Refer to the chart documentation for more information on [using a password file for Redis®](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/use-password-file/). + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +Refer to the chart documentation for more information on [creating the secret and a TLS deployment example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-tls/). + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +Redis® may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. + +Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/). + +## Persistence + +By default, the chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```console +helm install my-release --set master.persistence.existingClaim=PVC_NAME oci://registry-1.docker.io/bitnamicharts/redis +``` + +## Backup and restore + +Refer to the chart documentation for more information on [backing up and restoring Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/backup-restore/). + +## NetworkPolicy + +To enable network policy for Redis®, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +Refer to the chart documenation for more information on [enabling the network policy in Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-network-policy/). + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### RDB compatibility + +It's common to have RDB format changes across Redis® releases where we see backward compatibility but no forward compatibility. For example, v7.0 can load an RDB created by v6.2 , but the opposite is not true. +When that's the case, the rolling update can cause replicas to temporarily stop synchronizing while they are running a lower version than master. +For example, on a rolling update `master-0` and `replica-2` are updated first from version v6.2 to v7.0; `replica-0` and `replica-1` won't be able to start a full sync with `master-0` because they are still running v6.2 and can't support the RDB format from version 7.0 that master is now using. +This issue can be mitigated by splitting the upgrade into two stages: one for all replicas and another for any master. + +- Stage 1 (replicas only, as there's no master with an ordinal higher than 99): +`helm upgrade oci://registry-1.docker.io/bitnamicharts/redis --set master.updateStrategy.rollingUpdate.partition=99` +- Stage 2 (anything else that is not up to date, in this case only master): +`helm upgrade oci://registry-1.docker.io/bitnamicharts/redis` + +### To 18.0.0 + +This major version updates the Redis® docker image version used from `7.0` to `7.2`, the new stable version. There are no major changes in the chart, but we recommend checking the [Redis® 7.2 release notes](https://raw.githubusercontent.com/redis/redis/7.2/00-RELEASENOTES) before upgrading. + +NOTE: Due to an error in our release process, versions higher or equal than 17.15.4 already use 7.2 by default. + +### To 17.0.0 + +This major version updates the Redis® docker image version used from `6.2` to `7.0`, the new stable version. There are no major changes in the chart, but we recommend checking the [Redis® 7.0 release notes](https://raw.githubusercontent.com/redis/redis/7.0/00-RELEASENOTES) before upgrading. + +### To 16.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `master.service.port` renamed as `master.service.ports.redis`. +- `master.service.nodePort` renamed as `master.service.nodePorts.redis`. +- `replica.service.port` renamed as `replica.service.ports.redis`. +- `replica.service.nodePort` renamed as `replica.service.nodePorts.redis`. +- `sentinel.service.port` renamed as `sentinel.service.ports.redis`. +- `sentinel.service.sentinelPort` renamed as `sentinel.service.ports.sentinel`. +- `master.containerPort` renamed as `master.containerPorts.redis`. +- `replica.containerPort` renamed as `replica.containerPorts.redis`. +- `sentinel.containerPort` renamed as `sentinel.containerPorts.sentinel`. +- `master.spreadConstraints` renamed as `master.topologySpreadConstraints` +- `replica.spreadConstraints` renamed as `replica.topologySpreadConstraints` + +### To 15.0.0 + +The parameter to enable the usage of StaticIDs was removed. The behavior is to [always use StaticIDs](https://github.com/bitnami/charts/pull/7278). + +### To 14.8.0 + +The Redis® sentinel exporter was removed in this version because the upstream project was deprecated. The regular Redis® exporter is included in the sentinel scenario as usual. + +### To 14.0.0 + +- Several parameters were renamed or disappeared in favor of new ones on this major version: + - The term *slave* has been replaced by the term *replica*. Therefore, parameters prefixed with `slave` are now prefixed with `replicas`. + - Credentials parameter are reorganized under the `auth` parameter. + - `cluster.enabled` parameter is deprecated in favor of `architecture` parameter that accepts two values: `standalone` and `replication`. + - `securityContext.*` is deprecated in favor of `XXX.podSecurityContext` and `XXX.containerSecurityContext`. + - `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones. +- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added. +- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels). +- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami-labs/readme-generator-for-helm). + +Consequences: + +Backwards compatibility is not guaranteed. To upgrade to `14.0.0`, install a new release of the Redis® chart, and migrate the data from your previous release. You have 2 alternatives to do so: + +- Create a backup of the database, and restore it on the new release as explained in the [Backup and restore](#backup-and-restore) section. +- Reuse the PVC used to hold the master data on your previous release. To do so, use the `master.persistence.existingClaim` parameter. The following example assumes that the release name is `redis`: + +```console +helm install redis oci://registry-1.docker.io/bitnamicharts/redis --set auth.password=[PASSWORD] --set master.persistence.existingClaim=[EXISTING_PVC] +``` + +| Note: you need to substitute the placeholder *[EXISTING_PVC]* with the name of the PVC used on your previous release, and *[PASSWORD]* with the password used in your previous release. + +### To 13.0.0 + +This major version updates the Redis® docker image version used from `6.0` to `6.2`, the new stable version. There are no major changes in the chart and there shouldn't be any breaking changes in it as `6.2` is basically a stricter superset of `6.0`. For more information, please refer to [Redis® 6.2 release notes](https://raw.githubusercontent.com/redis/redis/6.2/00-RELEASENOTES). + +### To 12.3.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the Redis® exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the Redis® Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the Redis® Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/redis --set persistence.existingClaim= +``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the Redis® Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + +```console +helm delete --purge +helm install oci://registry-1.docker.io/bitnamicharts/redis +``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use Redis® Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### To 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See . + +It also fixes where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the Redis® StatefulSet before upgrading: + +```console +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the Redis® slave (and metrics if enabled) deployment: + +```console +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/install-bundler/install-redis/redis/charts/common/.helmignore b/install-bundler/install-redis/redis/charts/common/.helmignore new file mode 100644 index 00000000..50af0317 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/install-redis/redis/charts/common/Chart.yaml b/install-bundler/install-redis/redis/charts/common/Chart.yaml new file mode 100644 index 00000000..4da7ec09 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.13.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.13.0 diff --git a/install-bundler/install-redis/redis/charts/common/README.md b/install-bundler/install-redis/redis/charts/common/README.md new file mode 100644 index 00000000..fe6a0100 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/install-bundler/install-redis/redis/charts/common/templates/_affinities.tpl b/install-bundler/install-redis/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..e85b1df4 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_capabilities.tpl b/install-bundler/install-redis/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..b1257397 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,229 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if PodSecurityPolicy is supported +*/}} +{{- define "common.capabilities.psp.supported" -}} +{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if AdmissionConfiguration is supported +*/}} +{{- define "common.capabilities.admisionConfiguration.supported" -}} +{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for AdmissionConfiguration. +*/}} +{{- define "common.capabilities.admisionConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiserver.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityConfiguration. +*/}} +{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "pod-security.admission.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_errors.tpl b/install-bundler/install-redis/redis/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..07ded6f6 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_images.tpl b/install-bundler/install-redis/redis/charts/common/templates/_images.tpl new file mode 100644 index 00000000..e248d6d0 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_images.tpl @@ -0,0 +1,101 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/install-bundler/install-redis/redis/charts/common/templates/_ingress.tpl b/install-bundler/install-redis/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..efa5b85c --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_labels.tpl b/install-bundler/install-redis/redis/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..fa3833fb --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,46 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}} +{{- with .context.Chart.AppVersion -}} +{{- $_ := set $default "app.kubernetes.io/version" . -}} +{{- end -}} +{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .) }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_names.tpl b/install-bundler/install-redis/redis/charts/common/templates/_names.tpl new file mode 100644 index 00000000..a222924f --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_secrets.tpl b/install-bundler/install-redis/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a193c46b --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_storage.tpl b/install-bundler/install-redis/redis/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..16405a0f --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_tplvalues.tpl b/install-bundler/install-redis/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..a8ed7637 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_utils.tpl b/install-bundler/install-redis/redis/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..bfbddf05 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376). +Usage: +{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }} +*/}} +{{- define "common.utils.checksumTemplate" -}} +{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}} +{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/_warnings.tpl b/install-bundler/install-redis/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..66dffc1f --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_cassandra.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..eda9aada --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_mariadb.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..17d83a2f --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_mongodb.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..bbb445b8 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_mysql.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..ca3953f8 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_postgresql.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..8c9aa570 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_redis.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..fc0d208d --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/templates/validations/_validations.tpl b/install-bundler/install-redis/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..31ceda87 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/install-bundler/install-redis/redis/charts/common/values.yaml b/install-bundler/install-redis/redis/charts/common/values.yaml new file mode 100644 index 00000000..9abe0e15 --- /dev/null +++ b/install-bundler/install-redis/redis/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/install-bundler/install-redis/redis/img/redis-cluster-topology.png b/install-bundler/install-redis/redis/img/redis-cluster-topology.png new file mode 100644 index 00000000..f0a02a9f Binary files /dev/null and b/install-bundler/install-redis/redis/img/redis-cluster-topology.png differ diff --git a/install-bundler/install-redis/redis/img/redis-topology.png b/install-bundler/install-redis/redis/img/redis-topology.png new file mode 100644 index 00000000..3f5280fe Binary files /dev/null and b/install-bundler/install-redis/redis/img/redis-topology.png differ diff --git a/install-bundler/install-redis/redis/templates/NOTES.txt b/install-bundler/install-redis/redis/templates/NOTES.txt new file mode 100644 index 00000000..2623ade3 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/NOTES.txt @@ -0,0 +1,191 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + +For Redis: + + /opt/bitnami/scripts/redis/entrypoint.sh /opt/bitnami/scripts/redis/run.sh + +{{- if .Values.sentinel.enabled }} + +For Redis Sentinel: + + /opt/bitnami/scripts/redis-sentinel/entrypoint.sh /opt/bitnami/scripts/redis-sentinel/run.sh + +{{- end }} +{{- else }} + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.auth.enabled }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "auth.enabled=false" you have + most likely exposed the Redis® service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "auth.enabled=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if eq .Values.architecture "replication" }} +{{- if .Values.sentinel.enabled }} + +Redis® can be accessed via port {{ .Values.sentinel.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis® Sentinel cluster, which is available in port {{ .Values.sentinel.service.ports.sentinel }} using the same domain name above. + +{{- else }} + +Redis® can be accessed on the following DNS names from within your cluster: + + {{ printf "%s-master.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read/write operations (port {{ .Values.master.service.ports.redis }}) + {{ printf "%s-replicas.%s.svc.%s" (include "common.names.fullname" .) .Release.Namespace .Values.clusterDomain }} for read-only operations (port {{ .Values.replica.service.ports.redis }}) + +{{- end }} +{{- else }} + +Redis® can be accessed via port {{ .Values.master.service.ports.redis }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.auth.enabled }} + +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 -d) + +{{- end }} + +To connect to your Redis® server: + +1. Run a Redis® pod that you can use as a client: + + kubectl run --namespace {{ .Release.Namespace }} redis-client --restart='Never' {{ if .Values.auth.enabled }} --env REDIS_PASSWORD=$REDIS_PASSWORD {{ end }} --image {{ template "redis.image" . }} --command -- sleep infinity + +{{- if .Values.tls.enabled }} + + Copy your TLS certificates to the pod: + + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert redis-client:/tmp/client.cert + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key redis-client:/tmp/client.key + kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert redis-client:/tmp/CA.cert + +{{- end }} + + Use the following command to attach to the pod: + + kubectl exec --tty -i redis-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "common.names.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash + +2. Connect using the Redis® CLI: + +{{- if eq .Values.architecture "replication" }} + {{- if .Values.sentinel.enabled }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.redis }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }} -p {{ .Values.sentinel.service.ports.sentinel }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-master" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ printf "%s-replicas" (include "common.names.fullname" .) }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h {{ template "common.names.fullname" . }}-master{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + +Note: Since NetworkPolicy is enabled, only pods with label {{ template "common.names.fullname" . }}-client=true" will be able to connect to redis. + +{{- else }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{- if contains "NodePort" .Values.sentinel.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.sentinel.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.sentinel.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "common.names.fullname" . }} {{ .Values.sentinel.service.ports.redis }}:{{ .Values.sentinel.service.ports.redis }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.sentinel.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- else }} +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ printf "%s-master" (include "common.names.fullname" .) }}) + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ printf "%s-master" (include "common.names.fullname" .) }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h $SERVICE_IP -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ printf "%s-master" (include "common.names.fullname" .) }} {{ .Values.master.service.ports.redis }}:{{ .Values.master.service.ports.redis }} & + {{ if .Values.auth.enabled }}REDISCLI_AUTH="$REDIS_PASSWORD" {{ end }}redis-cli -h 127.0.0.1 -p {{ .Values.master.service.ports.redis }} {{- if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{- end }} +{{- end }} +{{- include "redis.checkRollingTags" . }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.sysctl.image }} +{{- include "redis.validateValues" . }} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Release.IsUpgrade ) }} +{{- if $.Values.sentinel.service.nodePorts.sentinel }} +No need to upgrade, ports and nodeports have been set from values +{{- else }} +#!#!#!#!#!#!#!# IMPORTANT #!#!#!#!#!#!#!# +YOU NEED TO PERFORM AN UPGRADE FOR THE SERVICES AND WORKLOAD TO BE CREATED +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/_helpers.tpl b/install-bundler/install-redis/redis/templates/_helpers.tpl new file mode 100644 index 00000000..7ca7e043 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/_helpers.tpl @@ -0,0 +1,328 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper Redis image name +*/}} +{{- define "redis.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Redis Sentinel image name +*/}} +{{- define "redis.sentinel.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sentinel.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.sysctl.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.sentinel.image .Values.metrics.image .Values.volumePermissions.image .Values.sysctl.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret object should be created +*/}} +{{- define "redis.createTlsSecret" -}} +{{- if and .Values.tls.enabled .Values.tls.autoGenerated (and (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret)) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing Redis TLS certificates +*/}} +{{- define "redis.tlsSecretName" -}} +{{- $secretName := coalesce .Values.tls.existingSecret .Values.tls.certificatesSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.crt" -}} +{{- else -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "tls.key" -}} +{{- else -}} + {{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- if (include "redis.createTlsSecret" . ) -}} + {{- printf "/opt/bitnami/redis/certs/%s" "ca.crt" -}} +{{- else -}} + {{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the shared service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the master service account to use +*/}} +{{- define "redis.masterServiceAccountName" -}} +{{- if .Values.master.serviceAccount.create -}} + {{ default (printf "%s-master" (include "common.names.fullname" .)) .Values.master.serviceAccount.name }} +{{- else -}} + {{- if .Values.serviceAccount.create -}} + {{ template "redis.serviceAccountName" . }} + {{- else -}} + {{ default "default" .Values.master.serviceAccount.name }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the replicas service account to use +*/}} +{{- define "redis.replicaServiceAccountName" -}} +{{- if .Values.replica.serviceAccount.create -}} + {{ default (printf "%s-replica" (include "common.names.fullname" .)) .Values.replica.serviceAccount.name }} +{{- else -}} + {{- if .Values.serviceAccount.create -}} + {{ template "redis.serviceAccountName" . }} + {{- else -}} + {{ default "default" .Values.replica.serviceAccount.name }} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configuration configmap name +*/}} +{{- define "redis.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "redis.createConfigmap" -}} +{{- if empty .Values.existingConfigmap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.auth.existingSecret -}} +{{- printf "%s" (tpl .Values.auth.existingSecret $) -}} +{{- else -}} +{{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis® secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.auth.existingSecret .Values.auth.existingSecretPasswordKey -}} +{{- printf "%s" (tpl .Values.auth.existingSecretPasswordKey $) -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return Redis® password +*/}} +{{- define "redis.password" -}} +{{- if or .Values.auth.enabled .Values.global.redis.password }} + {{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} + {{- else if not (empty .Values.auth.password) -}} + {{- .Values.auth.password -}} + {{- else -}} + {{- include "getValueFromSecret" (dict "Namespace" .Release.Namespace "Name" (include "redis.secretName" .) "Length" 10 "Key" (include "redis.secretPasswordKey" .)) -}} + {{- end -}} +{{- end -}} +{{- end }} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.sentinel.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.topologySpreadConstraints" .) -}} +{{- $messages := append $messages (include "redis.validateValues.architecture" .) -}} +{{- $messages := append $messages (include "redis.validateValues.podSecurityPolicy.create" .) -}} +{{- $messages := append $messages (include "redis.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.topologySpreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.replica.topologySpreadConstraints -}} +redis: topologySpreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - must provide a valid architecture */}} +{{- define "redis.validateValues.architecture" -}} +{{- if and (ne .Values.architecture "standalone") (ne .Values.architecture "replication") -}} +redis: architecture + Invalid architecture selected. Valid values are "standalone" and + "replication". Please set a valid architecture (--set architecture="xxxx") +{{- end -}} +{{- if and .Values.sentinel.enabled (not (eq .Values.architecture "replication")) }} +redis: architecture + Using redis sentinel on standalone mode is not supported. + To deploy redis sentinel, please select the "replication" mode + (--set "architecture=replication,sentinel.enabled=true") +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - PodSecurityPolicy create */}} +{{- define "redis.validateValues.podSecurityPolicy.create" -}} +{{- if and .Values.podSecurityPolicy.create (not .Values.podSecurityPolicy.enabled) }} +redis: podSecurityPolicy.create + In order to create PodSecurityPolicy, you also need to enable + podSecurityPolicy.enabled field +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis® - TLS enabled */}} +{{- define "redis.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.tls.autoGenerated) (not .Values.tls.existingSecret) (not .Values.tls.certificatesSecret) }} +redis: tls.enabled + In order to enable TLS, you also need to provide + an existing secret containing the TLS certificates or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* Define the suffix utilized for external-dns */}} +{{- define "redis.externalDNS.suffix" -}} +{{ printf "%s.%s" (include "common.names.fullname" .) .Values.useExternalDNS.suffix }} +{{- end -}} + +{{/* Compile all annotations utilized for external-dns */}} +{{- define "redis.externalDNS.annotations" -}} +{{- if and .Values.useExternalDNS.enabled .Values.useExternalDNS.annotationKey }} +{{ .Values.useExternalDNS.annotationKey }}hostname: {{ include "redis.externalDNS.suffix" . }} +{{- range $key, $val := .Values.useExternalDNS.additionalAnnotations }} +{{ $.Values.useExternalDNS.annotationKey }}{{ $key }}: {{ $val | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/configmap.yaml b/install-bundler/install-redis/redis/templates/configmap.yaml new file mode 100644 index 00000000..c616599c --- /dev/null +++ b/install-bundler/install-redis/redis/templates/configmap.yaml @@ -0,0 +1,61 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "redis.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + redis.conf: |- + # User-supplied common configuration: + {{- if .Values.commonConfiguration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonConfiguration "context" $ ) | nindent 4 }} + {{- end }} + # End of common configuration + master.conf: |- + dir {{ .Values.master.persistence.path }} + # User-supplied master configuration: + {{- if .Values.master.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.master.disableCommands }} + {{- range .Values.master.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of master configuration + replica.conf: |- + dir {{ .Values.replica.persistence.path }} + # User-supplied replica configuration: + {{- if .Values.replica.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.configuration "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.replica.disableCommands }} + {{- range .Values.replica.disableCommands }} + rename-command {{ . }} "" + {{- end }} + {{- end }} + # End of replica configuration + {{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + port {{ .Values.sentinel.containerPorts.sentinel }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "common.names.fullname" . }}-node-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.sentinel.service.ports.redis }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} + # User-supplied sentinel configuration: + {{- if .Values.sentinel.configuration }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.configuration "context" $ ) | nindent 4 }} + {{- end }} + # End of sentinel configuration + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/extra-list.yaml b/install-bundler/install-redis/redis/templates/extra-list.yaml new file mode 100644 index 00000000..2d35a580 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/headless-svc.yaml b/install-bundler/install-redis/redis/templates/headless-svc.yaml new file mode 100644 index 00000000..bd6121de --- /dev/null +++ b/install-bundler/install-redis/redis/templates/headless-svc.yaml @@ -0,0 +1,33 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + annotations: + {{- if or .Values.sentinel.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.sentinel.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} + {{- include "redis.externalDNS.annotations" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-redis + port: {{ if .Values.sentinel.enabled }}{{ .Values.sentinel.service.ports.redis }}{{ else }}{{ .Values.master.service.ports.redis }}{{ end }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.ports.sentinel }} + targetPort: redis-sentinel + {{- end }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} diff --git a/install-bundler/install-redis/redis/templates/health-configmap.yaml b/install-bundler/install-redis/redis/templates/health-configmap.yaml new file mode 100644 index 00000000..95ade5c4 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/health-configmap.yaml @@ -0,0 +1,194 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + ping_readiness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 15 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 15 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash + +{{- if .Values.auth.sentinel }} + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" +{{- end }} + response=$( + timeout -s 15 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert "$REDIS_SENTINEL_TLS_CA_FILE" \ + {{- if .Values.tls.authClients }} + --cert "$REDIS_SENTINEL_TLS_CERT_FILE" \ + --key "$REDIS_SENTINEL_TLS_KEY_FILE" \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 15 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash + + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 15 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$?" -eq "124" ]; then + echo "Timed out" + exit 1 + fi + responseFirstWord=$(echo $response | head -n1 | awk '{print $1;}') + if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/install-bundler/install-redis/redis/templates/master/application.yaml b/install-bundler/install-redis/redis/templates/master/application.yaml new file mode 100644 index 00000000..4b10b80d --- /dev/null +++ b/install-bundler/install-redis/redis/templates/master/application.yaml @@ -0,0 +1,523 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or (not (eq .Values.architecture "replication")) (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: {{ .Values.master.kind }} +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.master.count }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: master + {{- if (eq .Values.master.kind "StatefulSet") }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- end }} + {{- if .Values.master.updateStrategy }} + {{- if (eq .Values.master.kind "Deployment") }} + strategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- else }} + updateStrategy: {{- toYaml .Values.master.updateStrategy | nindent 4 }} + {{- end }} + {{- if and .Values.master.minReadySeconds (semverCompare ">= 1.23-0" (include "common.capabilities.kubeVersion" .)) }} + minReadySeconds: {{ .Values.master.minReadySeconds }} + {{- end }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: master + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ pick ( include (print $.Template.BasePath "/configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- end }} + checksum/health: {{ pick ( include (print $.Template.BasePath "/health-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/scripts: {{ pick ( include (print $.Template.BasePath "/scripts-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/secret: {{ pick ( include (print $.Template.BasePath "/secret.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.podSecurityContext.enabled }} + securityContext: {{- omit .Values.master.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.masterServiceAccountName" . }} + automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.master.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAffinityPreset "component" "master" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.master.podAntiAffinityPreset "component" "master" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.master.nodeAffinityPreset.type "key" .Values.master.nodeAffinityPreset.key "values" .Values.master.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.master.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.master.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.master.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName | quote }} + {{- end }} + {{- if .Values.master.dnsPolicy }} + dnsPolicy: {{ .Values.master.dnsPolicy }} + {{- end }} + {{- if .Values.master.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.dnsConfig "context" $) | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.master.enableServiceLinks }} + terminationGracePeriodSeconds: {{ .Values.master.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.master.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.master.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.master.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.master.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.master.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.master.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.master.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.master.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: master + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.master.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.master.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.master.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.master.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.master.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- end }} + {{- if .Values.master.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.master.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.master.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.master.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.master.resources }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + {{- if .Values.master.persistence.subPath }} + subPath: {{ .Values.master.persistence.subPath }} + {{- else if .Values.master.persistence.subPathExpr }} + subPathExpr: {{ .Values.master.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.master.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.master.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.master.podSecurityContext.enabled .Values.master.containerSecurityContext.enabled }} + {{- if or .Values.master.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.master.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.master.containerSecurityContext.runAsUser }}:{{ .Values.master.podSecurityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + {{- if .Values.master.persistence.subPath }} + subPath: {{ .Values.master.persistence.subPath }} + {{- else if .Values.master.persistence.subPathExpr }} + subPathExpr: {{ .Values.master.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }} + emptyDir: + {{- if .Values.master.persistence.medium }} + medium: {{ .Values.master.persistence.medium | quote }} + {{- end }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }} + emptyDir: + {{- if .Values.master.persistence.medium }} + medium: {{ .Values.master.persistence.medium | quote }} + {{- end }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.master.persistence.enabled }} + - name: redis-data + {{- if or .Values.master.persistence.medium .Values.master.persistence.sizeLimit }} + emptyDir: + {{- if .Values.master.persistence.medium }} + medium: {{ .Values.master.persistence.medium | quote }} + {{- end }} + {{- if .Values.master.persistence.sizeLimit }} + sizeLimit: {{ .Values.master.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.master.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.master.persistence.existingClaim .) }} + {{- else if (eq .Values.master.kind "Deployment") }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + {{- else }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: redis-data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/master/psp.yaml b/install-bundler/install-redis/redis/templates/master/psp.yaml new file mode 100644 index 00000000..368a2193 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/master/psp.yaml @@ -0,0 +1,47 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (include "common.capabilities.psp.supported" .) .Values.podSecurityPolicy.create }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.podSecurityContext.fsGroup }} + max: {{ .Values.master.podSecurityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.master.containerSecurityContext.runAsUser }} + max: {{ .Values.master.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/master/pvc.yaml b/install-bundler/install-redis/redis/templates/master/pvc.yaml new file mode 100644 index 00000000..5c60d069 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/master/pvc.yaml @@ -0,0 +1,33 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "standalone") (eq .Values.master.kind "Deployment") (.Values.master.persistence.enabled) (not .Values.master.persistence.existingClaim) }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ printf "redis-data-%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if .Values.master.persistence.annotations }} + annotations: {{- toYaml .Values.master.persistence.annotations | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{- if .Values.master.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.selector "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.master.persistence.dataSource "context" $) | nindent 4 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.master.persistence "global" .Values.global) | nindent 2 }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/master/service.yaml b/install-bundler/install-redis/redis/templates/master/service.yaml new file mode 100644 index 00000000..091e97fe --- /dev/null +++ b/install-bundler/install-redis/redis/templates/master/service.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-master" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master + {{- if or .Values.master.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.master.service.type }} + {{- if or (eq .Values.master.service.type "LoadBalancer") (eq .Values.master.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }} + internalTrafficPolicy: {{ .Values.master.service.internalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") (not (empty .Values.master.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ toYaml .Values.master.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and .Values.master.service.clusterIP (eq .Values.master.service.type "ClusterIP") }} + clusterIP: {{ .Values.master.service.clusterIP }} + {{- end }} + {{- if .Values.master.service.sessionAffinity }} + sessionAffinity: {{ .Values.master.service.sessionAffinity }} + {{- end }} + {{- if .Values.master.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.master.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.master.service.externalIPs }} + externalIPs: {{- include "common.tplvalues.render" (dict "value" .Values.master.service.externalIPs "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.master.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.master.service.type "NodePort") (eq .Values.master.service.type "LoadBalancer")) .Values.master.service.nodePorts.redis}} + nodePort: {{ .Values.master.service.nodePorts.redis}} + {{- else if eq .Values.master.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.master.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.master.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: master +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/master/serviceaccount.yaml b/install-bundler/install-redis/redis/templates/master/serviceaccount.yaml new file mode 100644 index 00000000..bb6c42ae --- /dev/null +++ b/install-bundler/install-redis/redis/templates/master/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.master.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.master.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.masterServiceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.master.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/metrics-svc.yaml b/install-bundler/install-redis/redis/templates/metrics-svc.yaml new file mode 100644 index 00000000..7d1d683d --- /dev/null +++ b/install-bundler/install-redis/redis/templates/metrics-svc.yaml @@ -0,0 +1,41 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and .Values.metrics.service.clusterIP (eq .Values.metrics.service.type "ClusterIP") }} + clusterIP: {{ .Values.metrics.service.clusterIP }} + {{- end }} + {{- if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.service.port }} + protocol: TCP + targetPort: metrics + {{- if .Values.metrics.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/networkpolicy.yaml b/install-bundler/install-redis/redis/templates/networkpolicy.yaml new file mode 100644 index 00000000..bd8594e3 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/networkpolicy.yaml @@ -0,0 +1,105 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + policyTypes: + - Ingress + {{- if or (eq .Values.architecture "replication") .Values.networkPolicy.extraEgress }} + - Egress + egress: + {{- if eq .Values.architecture "replication" }} + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + to: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- end }} + {{- if .Values.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.master.containerPorts.redis }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.containerPorts.sentinel }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 14 }} + {{- if or .Values.networkPolicy.ingressNSMatchLabels .Values.networkPolicy.ingressNSPodMatchLabels }} + - namespaceSelector: + matchLabels: + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{ else }} + {} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- if not .Values.networkPolicy.metrics.allowExternal }} + from: + {{- if or .Values.networkPolicy.metrics.ingressNSMatchLabels .Values.networkPolicy.metrics.ingressNSPodMatchLabels }} + - namespaceSelector: + matchLabels: + {{- if .Values.networkPolicy.metrics.ingressNSMatchLabels }} + {{- range $key, $value := .Values.networkPolicy.metrics.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{ else }} + {} + {{- end }} + {{- if .Values.networkPolicy.metrics.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.metrics.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/pdb.yaml b/install-bundler/install-redis/redis/templates/pdb.yaml new file mode 100644 index 00000000..3306a8ce --- /dev/null +++ b/install-bundler/install-redis/redis/templates/pdb.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/prometheusrule.yaml b/install-bundler/install-redis/redis/templates/prometheusrule.yaml new file mode 100644 index 00000000..73c89e65 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.rules "context" $ ) | nindent 8 }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/replicas/hpa.yaml b/install-bundler/install-redis/redis/templates/replicas/hpa.yaml new file mode 100644 index 00000000..37ecc831 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/replicas/hpa.yaml @@ -0,0 +1,49 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.replica.autoscaling.enabled (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/replicas/service.yaml b/install-bundler/install-redis/redis/templates/replicas/service.yaml new file mode 100644 index 00000000..415771b6 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/replicas/service.yaml @@ -0,0 +1,56 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: replica + {{- if or .Values.replica.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.replica.service.type }} + {{- if or (eq .Values.replica.service.type "LoadBalancer") (eq .Values.replica.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.replica.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if (semverCompare ">=1.22-0" (include "common.capabilities.kubeVersion" .)) }} + internalTrafficPolicy: {{ .Values.replica.service.internalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.replica.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.replica.service.type "LoadBalancer") (not (empty .Values.replica.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ toYaml .Values.replica.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and .Values.replica.service.clusterIP (eq .Values.replica.service.type "ClusterIP") }} + clusterIP: {{ .Values.replica.service.clusterIP }} + {{- end }} + {{- if .Values.replica.service.sessionAffinity }} + sessionAffinity: {{ .Values.replica.service.sessionAffinity }} + {{- end }} + {{- if .Values.replica.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.replica.service.ports.redis }} + targetPort: redis + {{- if and (or (eq .Values.replica.service.type "NodePort") (eq .Values.replica.service.type "LoadBalancer")) .Values.replica.service.nodePorts.redis}} + nodePort: {{ .Values.replica.service.nodePorts.redis}} + {{- else if eq .Values.replica.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.replica.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: replica +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/replicas/serviceaccount.yaml b/install-bundler/install-redis/redis/templates/replicas/serviceaccount.yaml new file mode 100644 index 00000000..616e8bc8 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/replicas/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.replica.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.replica.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.replicaServiceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.replica.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/replicas/statefulset.yaml b/install-bundler/install-redis/redis/templates/replicas/statefulset.yaml new file mode 100644 index 00000000..e7a92732 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/replicas/statefulset.yaml @@ -0,0 +1,520 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replication") (not .Values.sentinel.enabled) }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-replicas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.replica.autoscaling.enabled }} + replicas: {{ .Values.replica.replicaCount }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: replica + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if and .Values.replica.minReadySeconds (semverCompare ">= 1.23-0" (include "common.capabilities.kubeVersion" .)) }} + minReadySeconds: {{ .Values.replica.minReadySeconds }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: replica + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ pick ( include (print $.Template.BasePath "/configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- end }} + checksum/health: {{ pick ( include (print $.Template.BasePath "/health-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/scripts: {{ pick ( include (print $.Template.BasePath "/scripts-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/secret: {{ pick ( include (print $.Template.BasePath "/secret.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.replicaServiceAccountName" . }} + automountServiceAccountToken: {{ .Values.replica.serviceAccount.automountServiceAccountToken }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "replica" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "replica" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.replica.enableServiceLinks }} + terminationGracePeriodSeconds: {{ .Values.replica.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-replica.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_REPLICATION_MODE + value: replica + - name: REDIS_MASTER_HOST + {{- if and (eq (int64 .Values.master.count) 1) (ne .Values.master.kind "Deployment") }} + value: {{ template "common.names.fullname" . }}-master-0.{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + {{- else }} + value: {{ template "common.names.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + {{- end }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.master.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: redis + {{- end }} + {{- if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ include "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + {{- if or .Values.replica.persistence.medium .Values.replica.persistence.sizeLimit }} + emptyDir: + {{- if .Values.replica.persistence.medium }} + medium: {{ .Values.replica.persistence.medium | quote }} + {{- end }} + {{- if .Values.replica.persistence.sizeLimit }} + sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if or .Values.replica.persistence.medium .Values.replica.persistence.sizeLimit }} + emptyDir: + {{- if .Values.replica.persistence.medium }} + medium: {{ .Values.replica.persistence.medium | quote }} + {{- end }} + {{- if .Values.replica.persistence.sizeLimit }} + sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.replica.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }} + {{- else }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: redis-data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.master.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + app.kubernetes.io/component: replica + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.replica.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.replica.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/role.yaml b/install-bundler/install-redis/redis/templates/role.yaml new file mode 100644 index 00000000..be042294 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/role.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if and (include "common.capabilities.psp.supported" .) .Values.podSecurityPolicy.enabled }} + - apiGroups: + - '{{ template "podSecurityPolicy.apiGroup" . }}' + resources: + - 'podsecuritypolicies' + verbs: + - 'use' + resourceNames: [{{ printf "%s-master" (include "common.names.fullname" .) }}] + {{- end }} + {{- if .Values.rbac.rules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.rbac.rules "context" $ ) | nindent 2 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/rolebinding.yaml b/install-bundler/install-redis/redis/templates/rolebinding.yaml new file mode 100644 index 00000000..7a1043e1 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "common.names.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/scripts-configmap.yaml b/install-bundler/install-redis/redis/templates/scripts-configmap.yaml new file mode 100644 index 00000000..45847655 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/scripts-configmap.yaml @@ -0,0 +1,757 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + full_hostname="${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + full_hostname="${hostname}.{{- .Release.Namespace }}" + {{- else }} + full_hostname="${hostname}.${HEADLESS_SERVICE}" + {{- end }} + + {{- if .Values.useHostnames }} + echo "${full_hostname}" + {{- else }} + retry_count=0 + until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do + if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then + sleep {{ .Values.nameResolutionTimeout }} + else + error "IP address for ${full_hostname} not found" + exit 1 + fi + ((retry_count++)) + done + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [ -n "$REDIS_EXTERNAL_MASTER_HOST" ]; then + REDIS_SERVICE="$REDIS_EXTERNAL_MASTER_HOST" + else + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + fi + + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "SENTINEL") + validate_quorum() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel master {{ .Values.sentinel.masterSet }}" + else + quorum_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel master {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $quorum_info_command" + eval $quorum_info_command | grep -Fq "s_down" + } + + trigger_manual_failover() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel failover {{ .Values.sentinel.masterSet }}" + else + failover_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel failover {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $failover_command" + eval $failover_command + } + + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + info "about to run the command: $sentinel_info_command" + retry_while "eval $sentinel_info_command" 2 1 + } + + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + + # check if there is a master + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + master_port_in_persisted_conf="$REDIS_MASTER_PORT_NUMBER" + master_in_sentinel="$(get_sentinel_master_info)" + redisRetVal=$? + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + master_port_in_persisted_conf="$(awk '/monitor/ {print $5}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master ${master_in_persisted_conf}:${master_port_in_persisted_conf} in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + touch /opt/bitnami/redis-sentinel/etc/.node_read + fi + {{- end }} + + if [[ $redisRetVal -ne 0 ]]; then + if [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 1: No active sentinel and in previous sentinel.conf we were the master --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 2: No active sentinel and in previous sentinel.conf we were not master --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="replica" + REDIS_MASTER_HOST=${master_in_persisted_conf} + REDIS_MASTER_PORT_NUMBER=${master_port_in_persisted_conf} + fi + else + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "Current master: REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + if [[ "$REDIS_MASTER_HOST" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # Case 3: Active sentinel and master it is this node --> MASTER + info "Configuring the node as master" + export REDIS_REPLICATION_MODE="master" + else + # Case 4: Active sentinel and master is not this node --> REPLICA + info "Configuring the node as replica" + export REDIS_REPLICATION_MODE="replica" + + {{- if and .Values.sentinel.automateClusterRecovery (le (int .Values.sentinel.downAfterMilliseconds) 2000) }} + retry_count=1 + while validate_quorum + do + info "sleeping, waiting for Redis master to come up" + sleep 1s + if ! ((retry_count % 11)); then + info "Trying to manually failover" + failover_result=$(trigger_manual_failover) + + debug "Failover result: $failover_result" + fi + + ((retry_count+=1)) + done + info "Redis master is up now" + {{- end }} + fi + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" = "slave" ]] || [[ "$REDIS_REPLICATION_MODE" = "replica" ]]; then + ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.replica.preExecCmds }} + {{- .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libfile.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + full_hostname="${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + full_hostname="${hostname}.{{- .Release.Namespace }}" + {{- else }} + full_hostname="${hostname}.${HEADLESS_SERVICE}" + {{- end }} + + {{- if .Values.useHostnames }} + echo "${full_hostname}" + {{- else }} + retry_count=0 + until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do + if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then + sleep {{ .Values.nameResolutionTimeout }} + else + error "IP address for ${full_hostname} not found" + exit 1 + fi + ((retry_count++)) + done + {{- end }} + } + + SERVPORT=$(get_port "$HOSTNAME" "SENTINEL") + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + SENTINEL_SERVICE_PORT=$(get_port "{{ include "common.names.fullname" . }}" "SENTINEL") + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + get_sentinel_master_info() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="{{- if and .Values.auth.enabled .Values.auth.sentinel }}REDISCLI_AUTH="\$REDIS_PASSWORD" {{ end }}timeout {{ .Values.sentinel.getMasterTimeout }} redis-cli -h $REDIS_SERVICE -p $SENTINEL_SERVICE_PORT sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + info "about to run the command: $sentinel_info_command" + retry_while "eval $sentinel_info_command" 2 1 + } + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + + master_in_persisted_conf="$(get_full_hostname "$HOSTNAME")" + + {{- if .Values.sentinel.persistence.enabled }} + if [[ -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + check_lock_file() { + [[ -f /opt/bitnami/redis-sentinel/etc/.node_read ]] + } + retry_while "check_lock_file" + rm -f /opt/bitnami/redis-sentinel/etc/.node_read + master_in_persisted_conf="$(awk '/monitor/ {print $4}' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + info "Found previous master $master_in_persisted_conf in /opt/bitnami/redis-sentinel/etc/sentinel.conf" + debug "$(cat /opt/bitnami/redis-sentinel/etc/sentinel.conf | grep monitor)" + fi + {{- end }} + if ! get_sentinel_master_info && [[ "$master_in_persisted_conf" == "$(get_full_hostname "$HOSTNAME")" ]]; then + # No master found, lets create a master node + export REDIS_REPLICATION_MODE="master" + + REDIS_MASTER_HOST=$(get_full_hostname "$HOSTNAME") + REDIS_MASTER_PORT_NUMBER="$REDISPORT" + else + export REDIS_REPLICATION_MODE="replica" + + # Fetches current master's host and port + REDIS_SENTINEL_INFO=($(get_sentinel_master_info)) + info "printing REDIS_SENTINEL_INFO=(${REDIS_SENTINEL_INFO[0]},${REDIS_SENTINEL_INFO[1]})" + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + fi + + if [[ -n "$REDIS_EXTERNAL_MASTER_HOST" ]]; then + REDIS_MASTER_HOST="$REDIS_EXTERNAL_MASTER_HOST" + REDIS_MASTER_PORT_NUMBER="${REDIS_EXTERNAL_MASTER_PORT}" + fi + + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.auth.enabled }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if and .Values.auth.enabled .Values.auth.sentinel }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + + if [[ -z "$REDIS_MASTER_HOST" ]] || [[ -z "$REDIS_MASTER_PORT_NUMBER" ]] + then + # Prevent incorrect configuration to be written to sentinel.conf + error "Redis master host is configured incorrectly (host: $REDIS_MASTER_HOST, port: $REDIS_MASTER_PORT_NUMBER)" + exit 1 + fi + + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_known_sentinel() { + hostname="$1" + ip="$2" + + if [[ -n "$hostname" && -n "$ip" && "$hostname" != "$HOSTNAME" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "SENTINEL") $(host_id "$hostname")" + fi + } + add_known_replica() { + hostname="$1" + ip="$2" + + if [[ -n "$ip" && "$(get_full_hostname "$hostname")" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $(get_full_hostname "$hostname") $(get_port "$hostname" "REDIS")" + fi + } + + # Add available hosts on the network as known replicas & sentinels + for node in $(seq 0 $(({{ .Values.replica.replicaCount }}-1))); do + hostname="{{ template "common.names.fullname" . }}-node-$node" + ip="$(getent hosts "$hostname.$HEADLESS_SERVICE" | awk '{ print $1 }')" + add_known_sentinel "$hostname" "$ip" + add_known_replica "$hostname" "$ip" + done + + echo "" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if not (contains "sentinel announce-hostnames" .Values.sentinel.configuration) }} + echo "sentinel announce-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel resolve-hostnames" .Values.sentinel.configuration) }} + echo "sentinel resolve-hostnames yes" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel announce-port" .Values.sentinel.configuration) }} + echo "sentinel announce-port $SERVPORT" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- if not (contains "sentinel announce-ip" .Values.sentinel.configuration) }} + echo "sentinel announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} --sentinel + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + full_hostname="${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + full_hostname="${hostname}.{{- .Release.Namespace }}" + {{- else }} + full_hostname="${hostname}.${HEADLESS_SERVICE}" + {{- end }} + + {{- if .Values.useHostnames }} + echo "${full_hostname}" + {{- else }} + retry_count=0 + until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do + if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then + sleep {{ .Values.nameResolutionTimeout }} + else + error "IP address for ${full_hostname} not found" + exit 1 + fi + ((retry_count++)) + done + {{- end }} + } + + run_sentinel_command() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli -h "$REDIS_SERVICE" -p "$REDIS_SENTINEL_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@" + else + redis-cli -h "$REDIS_SERVICE" -p "$REDIS_SENTINEL_PORT" sentinel "$@" + fi + } + sentinel_failover_finished() { + REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}")) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]] + } + + REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + {{ if .Values.auth.sentinel -}} + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + {{- end }} + + if ! sentinel_failover_finished; then + echo "I am the master pod and you are stopping me. Starting sentinel failover" + if retry_while "sentinel_failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1; then + echo "Master has been successfuly failed over to a different pod." + exit 0 + else + echo "Master failover failed" + exit 1 + fi + else + exit 0 + fi + prestop-redis.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libos.sh + + run_redis_command() { + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + redis-cli -h 127.0.0.1 -p "$REDIS_TLS_PORT" --tls --cert "$REDIS_TLS_CERT_FILE" --key "$REDIS_TLS_KEY_FILE" --cacert "$REDIS_TLS_CA_FILE" "$@" + else + redis-cli -h 127.0.0.1 -p "$REDIS_PORT" "$@" + fi + } + is_master() { + REDIS_ROLE=$(run_redis_command role | head -1) + [[ "$REDIS_ROLE" == "master" ]] + } + + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + full_hostname="${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + full_hostname="${hostname}.{{- .Release.Namespace }}" + {{- else }} + full_hostname="${hostname}.${HEADLESS_SERVICE}" + {{- end }} + + {{- if .Values.useHostnames }} + echo "${full_hostname}" + {{- else }} + retry_count=0 + until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do + if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then + sleep {{ .Values.nameResolutionTimeout }} + else + error "IP address for ${full_hostname} not found" + exit 1 + fi + ((retry_count++)) + done + {{- end }} + } + + run_sentinel_command() { + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + {{ .Values.auth.sentinel | ternary "" "env -u REDISCLI_AUTH " -}} redis-cli -h "$REDIS_SERVICE" -p "$REDIS_SENTINEL_PORT" --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel "$@" + else + {{ .Values.auth.sentinel | ternary "" "env -u REDISCLI_AUTH " -}} redis-cli -h "$REDIS_SERVICE" -p "$REDIS_SENTINEL_PORT" sentinel "$@" + fi + } + sentinel_failover_finished() { + REDIS_SENTINEL_INFO=($(run_sentinel_command get-master-addr-by-name "{{ .Values.sentinel.masterSet }}")) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + [[ "$REDIS_MASTER_HOST" != "$(get_full_hostname $HOSTNAME)" ]] + } + + REDIS_SERVICE="{{ include "common.names.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + # redis-cli automatically consumes credentials from the REDISCLI_AUTH variable + [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" + [[ -f "$REDIS_PASSWORD_FILE" ]] && export REDISCLI_AUTH="$(< "${REDIS_PASSWORD_FILE}")" + + + if is_master && ! sentinel_failover_finished; then + echo "I am the master pod and you are stopping me. Pausing client connections." + # Pausing client write connections to avoid data loss + run_redis_command CLIENT PAUSE "{{ mul (add 2 (sub .Values.sentinel.terminationGracePeriodSeconds 10)) 1000 }}" WRITE + + echo "Issuing failover" + # if I am the master, issue a command to failover once + run_sentinel_command failover "{{ .Values.sentinel.masterSet }}" + + {{- if .Values.sentinel.redisShutdownWaitFailover }} + echo "Waiting for sentinel to complete failover for up to {{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}s" + retry_while "sentinel_failover_finished" "{{ sub .Values.sentinel.terminationGracePeriodSeconds 10 }}" 1 + {{- end }} + else + exit 0 + fi + +{{- else }} + start-master.sh: | + #!/bin/bash + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + {{- if and .Values.master.containerSecurityContext.runAsUser (eq (.Values.master.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if eq .Values.architecture "replication" }} + start-replica.sh: | + #!/bin/bash + + get_port() { + hostname="$1" + type="$2" + + port_var=$(echo "${hostname^^}_SERVICE_PORT_$type" | sed "s/-/_/g") + port=${!port_var} + + if [ -z "$port" ]; then + case $type in + "SENTINEL") + echo {{ .Values.sentinel.containerPorts.sentinel }} + ;; + "REDIS") + echo {{ .Values.master.containerPorts.redis }} + ;; + esac + else + echo $port + fi + } + + get_full_hostname() { + hostname="$1" + + {{- if .Values.useExternalDNS.enabled }} + full_hostname="${hostname}.{{- include "redis.externalDNS.suffix" . }}" + {{- else if eq .Values.sentinel.service.type "NodePort" }} + full_hostname="${hostname}.{{- .Release.Namespace }}" + {{- else }} + full_hostname="${hostname}.${HEADLESS_SERVICE}" + {{- end }} + + {{- if .Values.useHostnames }} + echo "${full_hostname}" + {{- else }} + retry_count=0 + until getent hosts "${full_hostname}" | awk '{ print $1; exit }' | grep .; do + if [[ $retry_count -lt {{ .Values.nameResolutionThreshold }} ]]; then + sleep {{ .Values.nameResolutionTimeout }} + else + error "IP address for ${full_hostname} not found" + exit 1 + fi + ((retry_count++)) + done + {{- end }} + } + + REDISPORT=$(get_port "$HOSTNAME" "REDIS") + HEADLESS_SERVICE="{{ template "common.names.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" + [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" + {{- if and .Values.replica.containerSecurityContext.runAsUser (eq (.Values.replica.containerSecurityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.replica.persistence.path }} + {{- end }} + if [[ -f /opt/bitnami/redis/mounted-etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + + echo "" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-port $REDISPORT" >> /opt/bitnami/redis/etc/replica.conf + echo "replica-announce-ip $(get_full_hostname "$HOSTNAME")" >> /opt/bitnami/redis/etc/replica.conf + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--replicaof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.auth.enabled }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.replica.extraFlags }} + {{- range .Values.replica.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.replica.preExecCmds }} + {{ .Values.replica.preExecCmds | nindent 4 }} + {{- end }} + {{- if .Values.replica.command }} + exec {{ .Values.replica.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/secret-svcbind.yaml b/install-bundler/install-redis/redis/templates/secret-svcbind.yaml new file mode 100644 index 00000000..a1bfbe05 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/secret-svcbind.yaml @@ -0,0 +1,37 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceBindings.enabled }} +{{- $host := include "common.names.fullname" . }} +{{- if not .Values.sentinel.enabled }} +{{- $host = printf "%s-master" (include "common.names.fullname" .) }} +{{- end }} +{{- $port := print .Values.master.service.ports.redis }} +{{- if .Values.sentinel.enabled }} +{{- $port = print .Values.sentinel.service.ports.redis }} +{{- end }} +{{- $password := include "redis.password" . }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-svcbind + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: servicebinding.io/redis +data: + provider: {{ print "bitnami" | b64enc | quote }} + type: {{ print "redis" | b64enc | quote }} + host: {{ print $host | b64enc | quote }} + port: {{ print $port | b64enc | quote }} + password: {{ print $password | b64enc | quote }} + {{- if $password }} + uri: {{ printf "redis://:%s@%s:%s" $password $host $port | b64enc | quote }} + {{- else }} + uri: {{ printf "redis://%s:%s" $host $port | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/secret.yaml b/install-bundler/install-redis/redis/templates/secret.yaml new file mode 100644 index 00000000..1838c7d4 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/secret.yaml @@ -0,0 +1,25 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.secretAnnotations .Values.commonAnnotations }} + annotations: + {{- if .Values.secretAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.secretAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/install-bundler/install-redis/redis/templates/sentinel/hpa.yaml b/install-bundler/install-redis/redis/templates/sentinel/hpa.yaml new file mode 100644 index 00000000..80859c00 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/sentinel/hpa.yaml @@ -0,0 +1,49 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.replica.autoscaling.enabled .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: replica + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: StatefulSet + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + minReplicas: {{ .Values.replica.autoscaling.minReplicas }} + maxReplicas: {{ .Values.replica.autoscaling.maxReplicas }} + metrics: + {{- if .Values.replica.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.replica.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.replica.autoscaling.targetCPU }} + {{- end }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/sentinel/node-services.yaml b/install-bundler/install-redis/redis/templates/sentinel/node-services.yaml new file mode 100644 index 00000000..721185bc --- /dev/null +++ b/install-bundler/install-redis/redis/templates/sentinel/node-services.yaml @@ -0,0 +1,67 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (or .Release.IsUpgrade .Values.sentinel.service.nodePorts.redis ) }} + +{{- range $i := until (int .Values.replica.replicaCount) }} + +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" $ ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-node-%s-%s" (include "common.names.fullname" $) (toString $i) "redis") }} +{{- else }} +{{- end }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" $ }}-node-{{ $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: node + {{- if or $.Values.commonAnnotations $.Values.sentinel.service.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.sentinel.service.annotations $.Values.commonAnnotations ) "context" $ ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: NodePort + ports: + - name: sentinel + {{- if $.Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.sentinel $i 1) }} + {{- else }} + nodePort: {{ $sentinelport }} + port: {{ $sentinelport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis + {{- if $.Values.sentinel.service.nodePorts.redis }} + nodePort: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + port: {{ (add $.Values.sentinel.service.nodePorts.redis $i 1) }} + {{- else }} + nodePort: {{ $redisport }} + port: {{ $redisport }} + {{- end }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + - name: sentinel-internal + nodePort: null + port: {{ $.Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ $.Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ $.Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ $.Values.replica.containerPorts.redis }} + selector: + statefulset.kubernetes.io/pod-name: {{ template "common.names.fullname" $ }}-node-{{ $i }} +--- +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/sentinel/ports-configmap.yaml b/install-bundler/install-redis/redis/templates/sentinel/ports-configmap.yaml new file mode 100644 index 00000000..1c0771a4 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/sentinel/ports-configmap.yaml @@ -0,0 +1,102 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled (eq .Values.sentinel.service.type "NodePort") (not .Values.sentinel.service.nodePorts.redis ) }} +{{- /* create a list to keep track of ports we choose to use */}} +{{ $chosenports := (list ) }} + +{{- /* Get list of all used nodeports */}} +{{ $usedports := (list ) }} +{{- range $index, $service := (lookup "v1" "Service" "" "").items }} + {{- range.spec.ports }} + {{- if .nodePort }} + {{- $usedports = (append $usedports .nodePort) }} + {{- end }} + {{- end }} +{{- end }} + +{{- /* +comments that start with # are rendered in the output when you debug, so you can less and search for them +Vars in the comment will be rendered out, so you can check their value this way. +https://helm.sh/docs/chart_best_practices/templates/#comments-yaml-comments-vs-template-comments + +remove the template comments and leave the yaml comments to help debug +*/}} + +{{- /* Sort the list */}} +{{ $usedports = $usedports | sortAlpha }} +#usedports {{ $usedports }} + +{{- /* How many nodeports per service do we want to create, except for the main service which is always two */}} +{{ $numberofPortsPerNodeService := 2 }} + +{{- /* for every nodeport we want, loop though the used ports to get an unused port */}} +{{- range $j := until (int (add (mul (int .Values.replica.replicaCount) $numberofPortsPerNodeService) 2)) }} + {{- /* #j={{ $j }} */}} + {{- $nodeport := (add $j 30000) }} + {{- $nodeportfound := false }} + {{- range $i := $usedports }} + {{- /* #i={{ $i }} + #nodeport={{ $nodeport }} + #usedports={{ $usedports }} */}} + {{- if and (has (toString $nodeport) $usedports) (eq $nodeportfound false) }} + {{- /* nodeport conflicts with in use */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if and ( has $nodeport $chosenports) (eq $nodeportfound false) }} + {{- /* nodeport already chosen, try another */}} + {{- $nodeport = (add $nodeport 1) }} + {{- else if (eq $nodeportfound false) }} + {{- /* nodeport free to use: not already claimed and not in use */}} + {{- /* select nodeport, and place into usedports */}} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- $nodeportfound = true }} + {{- else }} + {{- /* nodeport has already been chosen and locked in, just work through the rest of the list to get to the next nodeport selection */}} + {{- end }} + {{- end }} + {{- if (eq $nodeportfound false) }} + {{- $chosenports = (append $chosenports $nodeport) }} + {{- end }} + +{{- end }} + +{{- /* print the usedports and chosenports for debugging */}} +#usedports {{ $usedports }} +#chosenports {{ $chosenports }}}} + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }}-ports-configmap + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} +{{- if $portsmap }} +{{- /* configmap already exists, do not install again */ -}} + {{- range $name, $value := $portsmap }} + "{{ $name }}": "{{ $value }}" + {{- end }} +{{- else }} +{{- /* configmap being set for first time */ -}} + {{- range $index, $port := $chosenports }} + {{- $nodenumber := (floor (div $index 2)) }} + {{- if (eq $index 0) }} + "{{ template "common.names.fullname" $ }}-sentinel": "{{ $port }}" + {{- else if (eq $index 1) }} + "{{ template "common.names.fullname" $ }}-redis": "{{ $port }}" + {{- else if (eq (mod $index 2) 0) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-sentinel": "{{ $port }}" + {{- else if (eq (mod $index 2) 1) }} + "{{ template "common.names.fullname" $ }}-node-{{ (sub $nodenumber 1) }}-redis": "{{ $port }}" + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/sentinel/service.yaml b/install-bundler/install-redis/redis/templates/sentinel/service.yaml new file mode 100644 index 00000000..18126f4e --- /dev/null +++ b/install-bundler/install-redis/redis/templates/sentinel/service.yaml @@ -0,0 +1,101 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +{{ $portsmap := (lookup "v1" "ConfigMap" $.Release.Namespace (printf "%s-%s" ( include "common.names.fullname" . ) "ports-configmap")).data }} + +{{ $sentinelport := 0}} +{{ $redisport := 0}} +{{- if $portsmap }} +{{ $sentinelport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "sentinel") }} +{{ $redisport = index $portsmap (printf "%s-%s" (include "common.names.fullname" $) "redis") }} +{{- else }} +{{- end }} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: node + {{- if or .Values.sentinel.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.sentinel.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{- if or (eq .Values.sentinel.service.type "LoadBalancer") (eq .Values.sentinel.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.sentinel.service.type "LoadBalancer") (not (empty .Values.sentinel.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ toYaml .Values.sentinel.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and .Values.sentinel.service.clusterIP (eq .Values.sentinel.service.type "ClusterIP") }} + clusterIP: {{ .Values.sentinel.service.clusterIP }} + {{- end }} + {{- if .Values.sentinel.service.sessionAffinity }} + sessionAffinity: {{ .Values.sentinel.service.sessionAffinity }} + {{- end }} + {{- if .Values.sentinel.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-redis + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + port: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $redisport }} + {{- else}} + port: {{ .Values.sentinel.service.ports.redis }} + {{- end }} + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.redis }} + nodePort: {{ .Values.sentinel.service.nodePorts.redis }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $redisport }} + {{- end }} + - name: tcp-sentinel + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + port: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "NodePort" }} + port: {{ $sentinelport }} + {{- else }} + port: {{ .Values.sentinel.service.ports.sentinel }} + {{- end }} + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if and (or (eq .Values.sentinel.service.type "NodePort") (eq .Values.sentinel.service.type "LoadBalancer")) .Values.sentinel.service.nodePorts.sentinel }} + nodePort: {{ .Values.sentinel.service.nodePorts.sentinel }} + {{- else if eq .Values.sentinel.service.type "ClusterIP" }} + nodePort: null + {{- else if eq .Values.sentinel.service.type "NodePort" }} + nodePort: {{ $sentinelport }} + {{- end }} + {{- if eq .Values.sentinel.service.type "NodePort" }} + - name: sentinel-internal + nodePort: null + port: {{ .Values.sentinel.containerPorts.sentinel }} + protocol: TCP + targetPort: {{ .Values.sentinel.containerPorts.sentinel }} + - name: redis-internal + nodePort: null + port: {{ .Values.replica.containerPorts.redis }} + protocol: TCP + targetPort: {{ .Values.replica.containerPorts.redis }} + {{- end }} + {{- if .Values.sentinel.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: node +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/sentinel/statefulset.yaml b/install-bundler/install-redis/redis/templates/sentinel/statefulset.yaml new file mode 100644 index 00000000..5f76a039 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/sentinel/statefulset.yaml @@ -0,0 +1,780 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if or .Release.IsUpgrade (ne .Values.sentinel.service.type "NodePort") .Values.sentinel.service.nodePorts.redis -}} +{{- if and (eq .Values.architecture "replication") .Values.sentinel.enabled }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ printf "%s-node" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: node + {{- if or .Values.commonAnnotations .Values.sentinel.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.sentinel.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replica.replicaCount }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.replica.podLabels .Values.commonLabels ) "context" . ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: node + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) }} + {{- if .Values.replica.updateStrategy }} + updateStrategy: {{- toYaml .Values.replica.updateStrategy | nindent 4 }} + {{- end }} + {{- if and .Values.replica.minReadySeconds (semverCompare ">= 1.23-0" (include "common.capabilities.kubeVersion" .)) }} + minReadySeconds: {{ .Values.replica.minReadySeconds }} + {{- end }} + {{- if .Values.replica.podManagementPolicy }} + podManagementPolicy: {{ .Values.replica.podManagementPolicy | quote }} + {{- end }} + template: + metadata: + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: node + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podLabels "context" $ ) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "redis.createConfigmap" .) }} + checksum/configmap: {{ pick ( include (print $.Template.BasePath "/configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- end }} + checksum/health: {{ pick ( include (print $.Template.BasePath "/health-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/scripts: {{ pick ( include (print $.Template.BasePath "/scripts-configmap.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + checksum/secret: {{ pick ( include (print $.Template.BasePath "/secret.yaml") . | fromYaml ) "data" | toYaml | sha256sum }} + {{- if .Values.replica.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.replica.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.replica.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.podSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.replica.priorityClassName }} + priorityClassName: {{ .Values.replica.priorityClassName | quote }} + {{- end }} + {{- if .Values.replica.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.replica.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAffinityPreset "component" "node" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.replica.podAntiAffinityPreset "component" "node" "customLabels" $podLabels "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.replica.nodeAffinityPreset.type "key" .Values.replica.nodeAffinityPreset.key "values" .Values.replica.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.replica.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.replica.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.replica.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.replica.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.replica.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.replica.shareProcessNamespace }} + {{- end }} + {{- if .Values.replica.schedulerName }} + schedulerName: {{ .Values.replica.schedulerName | quote }} + {{- end }} + {{- if .Values.replica.dnsPolicy }} + dnsPolicy: {{ .Values.replica.dnsPolicy }} + {{- end }} + {{- if .Values.replica.dnsConfig }} + dnsConfig: {{- include "common.tplvalues.render" (dict "value" .Values.replica.dnsConfig "context" $) | nindent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.sentinel.enableServiceLinks }} + terminationGracePeriodSeconds: {{ .Values.sentinel.terminationGracePeriodSeconds }} + containers: + - name: redis + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.replica.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-redis.sh + {{- end }} + {{- end }} + {{- if .Values.replica.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.replica.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.replica.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.replica.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.replica.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.replica.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: ALLOW_EMPTY_PASSWORD + value: {{ ternary "no" "yes" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.replica.containerPorts.redis | quote }} + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.replica.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.replica.externalMaster.port | quote }} + {{- end }} + {{- if .Values.replica.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.replica.extraEnvVarsCM .Values.replica.extraEnvVarsSecret }} + envFrom: + {{- if .Values.replica.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.replica.extraEnvVarsCM }} + {{- end }} + {{- if .Values.replica.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.replica.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.replica.containerPorts.redis }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.replica.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.replica.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.replica.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.replica.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.replica.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.replica.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.replica.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.replica.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.replica.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.replica.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.replica.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.replica.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.replica.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.replica.resources }} + resources: {{- toYaml .Values.replica.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.sentinel.persistence.enabled }} + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- end }} + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.replica.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + - name: sentinel + image: {{ template "redis.sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.lifecycleHooks "context" $) | nindent 12 }} + {{- else }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh + {{- end }} + {{- end }} + {{- if .Values.sentinel.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.sentinel.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.sentinel.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.sentinel.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.args "context" $) | nindent 12 }} + {{- else }} + args: + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.sentinel.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if .Values.auth.enabled }} + {{- if .Values.auth.usePasswordFiles }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.containerPorts.sentinel | quote }} + {{- end }} + {{- if .Values.sentinel.externalMaster.enabled }} + - name: REDIS_EXTERNAL_MASTER_HOST + value: {{ .Values.sentinel.externalMaster.host | quote }} + - name: REDIS_EXTERNAL_MASTER_PORT + value: {{ .Values.sentinel.externalMaster.port | quote }} + {{- end }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraEnvVars "context" $ ) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.containerPorts.sentinel }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.sentinel.startupProbe "enabled") "context" $) | nindent 12 }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.sentinel.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + {{- end }} + {{- end }} + {{- if .Values.sentinel.resources }} + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + - name: sentinel-data + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "common.names.fullname" . }} + {{- if .Values.auth.enabled }} + - name: REDIS_USER + value: default + {{- if (not .Values.auth.usePasswordFiles) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.replica.containerPorts.redis }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: metrics + containerPort: 9121 + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.metrics.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: metrics + {{- end }} + {{- if .Values.metrics.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.metrics.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.metrics.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: metrics + {{- end }} + {{- end }} + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.replica.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.sidecars "context" $) | nindent 8 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.replica.persistence.enabled .Values.replica.podSecurityContext.enabled .Values.replica.containerSecurityContext.enabled }} + {{- if or .Values.replica.initContainers $needsVolumePermissions .Values.sysctl.enabled }} + initContainers: + {{- if .Values.replica.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.replica.initContainers "context" $) | nindent 8 }} + {{- end }} + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ include "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.replica.persistence.path }} + {{- else }} + chown -R {{ .Values.replica.containerSecurityContext.runAsUser }}:{{ .Values.replica.podSecurityContext.fsGroup }} {{ .Values.replica.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.replica.persistence.path }} + {{- if .Values.replica.persistence.subPath }} + subPath: {{ .Values.replica.persistence.subPath }} + {{- else if .Values.replica.persistence.subPathExpr }} + subPathExpr: {{ .Values.replica.persistence.subPathExpr }} + {{- end }} + {{- end }} + {{- if .Values.sysctl.enabled }} + - name: init-sysctl + image: {{ include "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctl.image.pullPolicy | quote }} + securityContext: + privileged: true + runAsUser: 0 + {{- if .Values.sysctl.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.sysctl.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.resources }} + resources: {{- toYaml .Values.sysctl.resources | nindent 12 }} + {{- end }} + {{- if .Values.sysctl.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + - name: health + configMap: + name: {{ printf "%s-health" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if .Values.auth.usePasswordFiles }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ include "redis.configmapName" . }} + {{- if .Values.sysctl.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + {{- if not .Values.sentinel.persistence.enabled }} + - name: sentinel-data + {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }} + emptyDir: + {{- if .Values.sentinel.persistence.medium }} + medium: {{ .Values.sentinel.persistence.medium | quote }} + {{- end }} + {{- if .Values.sentinel.persistence.sizeLimit }} + sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + - name: redis-tmp-conf + {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }} + emptyDir: + {{- if .Values.sentinel.persistence.medium }} + medium: {{ .Values.sentinel.persistence.medium | quote }} + {{- end }} + {{- if .Values.sentinel.persistence.sizeLimit }} + sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + - name: tmp + {{- if or .Values.sentinel.persistence.medium .Values.sentinel.persistence.sizeLimit }} + emptyDir: + {{- if .Values.sentinel.persistence.medium }} + medium: {{ .Values.sentinel.persistence.medium | quote }} + {{- end }} + {{- if .Values.sentinel.persistence.sizeLimit }} + sizeLimit: {{ .Values.sentinel.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- if .Values.replica.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.replica.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ include "redis.tlsSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if not .Values.replica.persistence.enabled }} + - name: redis-data + {{- if or .Values.replica.persistence.medium .Values.replica.persistence.sizeLimit }} + emptyDir: + {{- if .Values.replica.persistence.medium }} + medium: {{ .Values.replica.persistence.medium | quote }} + {{- end }} + {{- if .Values.replica.persistence.sizeLimit }} + sizeLimit: {{ .Values.replica.persistence.sizeLimit | quote }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- else if .Values.replica.persistence.existingClaim }} + - name: redis-data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.replica.persistence.existingClaim .) }} + {{- else }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: redis-data + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.replica.persistence.annotations }} + annotations: {{- toYaml .Values.replica.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.replica.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.replica.persistence.size | quote }} + {{- if .Values.replica.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.replica.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.replica.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.sentinel.persistence.enabled }} + - metadata: + name: sentinel-data + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.sentinel.persistence.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + app.kubernetes.io/component: node + {{- if .Values.sentinel.persistence.annotations }} + annotations: {{- toYaml .Values.sentinel.persistence.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.sentinel.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.sentinel.persistence.size | quote }} + {{- if .Values.sentinel.persistence.selector }} + selector: {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.sentinel.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" .Values.sentinel.persistence "global" .Values.global) | nindent 8 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/serviceaccount.yaml b/install-bundler/install-redis/redis/templates/serviceaccount.yaml new file mode 100644 index 00000000..4306b3e8 --- /dev/null +++ b/install-bundler/install-redis/redis/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.serviceAccount.create (and (not .Values.master.serviceAccount.create) (not .Values.replica.serviceAccount.create)) }} +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/servicemonitor.yaml b/install-bundler/install-redis/redis/templates/servicemonitor.yaml new file mode 100644 index 00000000..ee925afc --- /dev/null +++ b/install-bundler/install-redis/redis/templates/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabellings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.podTargetLabels }} + podTargetLabels: {{- toYaml .Values.metrics.serviceMonitor.podTargetLabels | nindent 4 }} + {{- end }} + {{ with .Values.metrics.serviceMonitor.sampleLimit }} + sampleLimit: {{ . }} + {{- end }} + {{ with .Values.metrics.serviceMonitor.targetLimit }} + targetLimit: {{ . }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/install-bundler/install-redis/redis/templates/tls-secret.yaml b/install-bundler/install-redis/redis/templates/tls-secret.yaml new file mode 100644 index 00000000..b1f7153e --- /dev/null +++ b/install-bundler/install-redis/redis/templates/tls-secret.yaml @@ -0,0 +1,31 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "redis.createTlsSecret" .) }} +{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }} +{{- $ca := genCA "redis-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $masterServiceName := printf "%s-master" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $masterServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/install-bundler/install-redis/redis/values.yaml b/install-bundler/install-redis/redis/values.yaml new file mode 100644 index 00000000..eb423af4 --- /dev/null +++ b/install-bundler/install-redis/redis/values.yaml @@ -0,0 +1,1867 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## @param global.redis.password Global Redis® password (overrides `auth.password`) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + redis: + password: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param secretAnnotations Annotations to add to secret +## +secretAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## @param useHostnames Use hostnames internally when announcing replication. If false, the hostname will be resolved to an IP address +## +useHostnames: true +## @param nameResolutionThreshold Failure threshold for internal hostnames resolution +## +nameResolutionThreshold: 5 +## @param nameResolutionTimeout Timeout seconds between probes for internal hostnames resolution +## +nameResolutionTimeout: 5 + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section Redis® Image parameters +## + +## Bitnami Redis® image +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## @param image.registry Redis® image registry +## @param image.repository Redis® image repository +## @param image.tag Redis® image tag (immutable tags are recommended) +## @param image.digest Redis® image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Redis® image pull policy +## @param image.pullSecrets Redis® image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/redis + tag: 7.2.1-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + +## @section Redis® common configuration parameters +## https://github.com/bitnami/containers/tree/main/bitnami/redis#configuration +## + +## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` +## +architecture: standalone +## Redis® Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/redis#setting-the-server-password-on-first-run +## +auth: + ## @param auth.enabled Enable password authentication + ## + enabled: true + ## @param auth.sentinel Enable password authentication on sentinels too + ## + sentinel: true + ## @param auth.password Redis® password + ## Defaults to a random 10-character alphanumeric string if not set + ## + password: "" + ## @param auth.existingSecret The name of an existing secret with Redis® credentials + ## NOTE: When it's set, the previous `auth.password` parameter is ignored + ## + existingSecret: "" + ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret + ## NOTE: ignored unless `auth.existingSecret` parameter is set + ## + existingSecretPasswordKey: "" + ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable + ## + usePasswordFiles: false + +## @param commonConfiguration [string] Common configuration to be added into the ConfigMap +## ref: https://redis.io/topics/config +## +commonConfiguration: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes +## +existingConfigmap: "" + +## @section Redis® master configuration parameters +## + +master: + ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) + ## + count: 1 + ## @param master.configuration Configuration for Redis® master nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param master.disableCommands Array with Redis® commands to disable on master nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param master.command Override default container command (useful when using custom images) + ## + command: [] + ## @param master.args Override default container args (useful when using custom images) + ## + args: [] + ## @param master.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param master.preExecCmds Additional commands to run prior to starting Redis® master + ## + preExecCmds: [] + ## @param master.extraFlags Array with additional command line flags for Redis® master + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes + ## + extraEnvVarsCM: "" + ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes + ## + extraEnvVarsSecret: "" + ## @param master.containerPorts.redis Container port to open on Redis® master nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes + ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param master.startupProbe.periodSeconds Period seconds for startupProbe + ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param master.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes + ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes + ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param master.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® master resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param master.resources.limits The resources limits for the Redis® master containers + ## @param master.resources.requests The requested resources for the Redis® master containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context + ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context + ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser + ## @param master.containerSecurityContext.runAsGroup Set Redis® master containers' Security Context runAsGroup + ## @param master.containerSecurityContext.runAsNonRoot Set Redis® master containers' Security Context runAsNonRoot + ## @param master.containerSecurityContext.allowPrivilegeEscalation Is it possible to escalate Redis® pod(s) privileges + ## @param master.containerSecurityContext.seccompProfile.type Set Redis® master containers' Security Context seccompProfile + ## @param master.containerSecurityContext.capabilities.drop Set Redis® master containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param master.kind Use either Deployment or StatefulSet (default) + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ + ## + kind: StatefulSet + ## @param master.schedulerName Alternate scheduler for Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param master.updateStrategy.type Redis® master statefulset strategy type + ## @skip master.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param master.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param master.priorityClassName Redis® master pods' priorityClassName + ## + priorityClassName: "" + ## @param master.hostAliases Redis® master pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param master.podLabels Extra labels for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param master.podAnnotations Annotations for Redis® master pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node master.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set + ## + key: "" + ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param master.affinity Affinity for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param master.nodeSelector Node labels for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param master.tolerations Tolerations for Redis® master pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param master.dnsPolicy DNS Policy for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param master.dnsConfig DNS Configuration for Redis® master pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) + ## + extraVolumes: [] + ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) + ## + extraVolumeMounts: [] + ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param master.initContainers Add additional init containers to the Redis® master pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims + ## + enabled: true + ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param master.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® master containers + ## + subPathExpr: "" + ## @param master.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param master.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param master.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param master.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param master.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param master.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param master.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires master.persistence.enabled: true + ## + existingClaim: "" + ## Redis® master service parameters + ## + service: + ## @param master.service.type Redis® master service type + ## + type: ClusterIP + ## @param master.service.ports.redis Redis® master service port + ## + ports: + redis: 6379 + ## @param master.service.nodePorts.redis Node port for Redis® master + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param master.service.clusterIP Redis® master service Cluster IP + ## + clusterIP: "" + ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param master.service.externalIPs Redis® master service External IPs + ## https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## e.g. + ## externalIPs: + ## - 10.10.10.1 + ## - 201.22.30.1 + ## + externalIPs: [] + ## @param master.service.annotations Additional custom annotations for Redis® master service + ## + annotations: {} + ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods + ## + terminationGracePeriodSeconds: 30 + ## ServiceAccount configuration + ## + serviceAccount: + ## @param master.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param master.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param master.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param master.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Redis® replicas configuration parameters +## + +replica: + ## @param replica.replicaCount Number of Redis® replicas to deploy + ## + replicaCount: 3 + ## @param replica.configuration Configuration for Redis® replicas nodes + ## ref: https://redis.io/topics/config + ## + configuration: "" + ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + ## @param replica.command Override default container command (useful when using custom images) + ## + command: [] + ## @param replica.args Override default container args (useful when using custom images) + ## + args: [] + ## @param replica.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas + ## + preExecCmds: [] + ## @param replica.extraFlags Array with additional command line flags for Redis® replicas + ## e.g: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsCM: "" + ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes + ## + extraEnvVarsSecret: "" + ## @param replica.externalMaster.enabled Use external master for bootstrapping + ## @param replica.externalMaster.host External master host to bootstrap from + ## @param replica.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes + ## + containerPorts: + redis: 6379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe + ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param replica.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + ## @param replica.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Redis® replicas resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param replica.resources.limits The resources limits for the Redis® replicas containers + ## @param replica.resources.requests The requested resources for the Redis® replicas containers + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context + ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context + ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser + ## @param replica.containerSecurityContext.runAsGroup Set Redis® replicas containers' Security Context runAsGroup + ## @param replica.containerSecurityContext.runAsNonRoot Set Redis® replicas containers' Security Context runAsNonRoot + ## @param replica.containerSecurityContext.allowPrivilegeEscalation Set Redis® replicas pod's Security Context allowPrivilegeEscalation + ## @param replica.containerSecurityContext.seccompProfile.type Set Redis® replicas containers' Security Context seccompProfile + ## @param replica.containerSecurityContext.capabilities.drop Set Redis® replicas containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type + ## @skip replica.updateStrategy.rollingUpdate + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate, OnDelete (statefulset), Recreate (deployment) + ## + type: RollingUpdate + ## @param replica.minReadySeconds How many seconds a pod needs to be ready before killing the next, during update + ## + minReadySeconds: 0 + ## @param replica.priorityClassName Redis® replicas pods' priorityClassName + ## + priorityClassName: "" + ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: "" + ## @param replica.hostAliases Redis® replicas pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param replica.podLabels Extra labels for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param replica.podAnnotations Annotations for Redis® replicas pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + ## + shareProcessNamespace: false + ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set + ## + key: "" + ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param replica.affinity Affinity for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param replica.tolerations Tolerations for Redis® replicas pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## E.g. + ## topologySpreadConstraints: + ## - maxSkew: 1 + ## topologyKey: node + ## whenUnsatisfiable: DoNotSchedule + ## + topologySpreadConstraints: [] + ## @param replica.dnsPolicy DNS Policy for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsPolicy: ClusterFirst + ## + dnsPolicy: "" + ## @param replica.dnsConfig DNS Configuration for Redis® replica pods + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ + ## E.g. + ## dnsConfig: + ## options: + ## - name: ndots + ## value: "4" + ## - name: single-request-reopen + ## + dnsConfig: {} + ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) + ## + extraVolumes: [] + ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) + ## + extraVolumeMounts: [] + ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Persistence Parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims + ## + enabled: true + ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers + ## NOTE: Useful when using different Redis® images + ## + path: /data + ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers + ## NOTE: Useful in dev environments + ## + subPath: "" + ## @param replica.persistence.subPathExpr Used to construct the subPath subdirectory of the volume to mount on Redis® replicas containers + ## + subPathExpr: "" + ## @param replica.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param replica.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param replica.persistence.size Persistent Volume size + ## + size: 8Gi + ## @param replica.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param replica.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param replica.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param replica.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound + ## NOTE: requires replica.persistence.enabled: true + ## + existingClaim: "" + ## Redis® replicas service parameters + ## + service: + ## @param replica.service.type Redis® replicas service type + ## + type: ClusterIP + ## @param replica.service.ports.redis Redis® replicas service port + ## + ports: + redis: 6379 + ## @param replica.service.nodePorts.redis Node port for Redis® replicas + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + redis: "" + ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) + ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ + ## + internalTrafficPolicy: Cluster + ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param replica.service.clusterIP Redis® replicas service Cluster IP + ## + clusterIP: "" + ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param replica.service.annotations Additional custom annotations for Redis® replicas service + ## + annotations: {} + ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods + ## + terminationGracePeriodSeconds: 30 + ## Autoscaling configuration + ## + autoscaling: + ## @param replica.autoscaling.enabled Enable replica autoscaling settings + ## + enabled: false + ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling + ## + minReplicas: 1 + ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling + ## + maxReplicas: 11 + ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling + ## + targetCPU: "" + ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling + ## + targetMemory: "" + ## ServiceAccount configuration + ## + serviceAccount: + ## @param replica.serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: false + ## @param replica.serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param replica.serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param replica.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Redis® Sentinel configuration parameters +## + +sentinel: + ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. + ## IMPORTANT: this will disable the master and replicas services and + ## create a single Redis® service exposing both the Redis and Sentinel ports + ## + enabled: false + ## Bitnami Redis® Sentinel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## @param sentinel.image.registry Redis® Sentinel image registry + ## @param sentinel.image.repository Redis® Sentinel image repository + ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) + ## @param sentinel.image.digest Redis® Sentinel image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy + ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets + ## @param sentinel.image.debug Enable image debug mode + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + tag: 7.2.1-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param sentinel.annotations Additional custom annotations for Redis® Sentinel resource + ## + annotations: {} + ## @param sentinel.masterSet Master set name + ## + masterSet: mymaster + ## @param sentinel.quorum Sentinel Quorum + ## + quorum: 2 + ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. + ## + getMasterTimeout: 99 + ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. + ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. + ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. + ## + automateClusterRecovery: false + ## @param sentinel.redisShutdownWaitFailover Whether the Redis® master container waits for the failover at shutdown (in addition to the Redis® Sentinel container). + ## + redisShutdownWaitFailover: true + ## Sentinel timing restrictions + ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down + ## @param sentinel.failoverTimeout Timeout for performing a election failover + ## + downAfterMilliseconds: 60000 + failoverTimeout: 180000 + ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover + ## + parallelSyncs: 1 + ## @param sentinel.configuration Configuration for Redis® Sentinel nodes + ## ref: https://redis.io/topics/sentinel + ## + configuration: "" + ## @param sentinel.command Override default container command (useful when using custom images) + ## + command: [] + ## @param sentinel.args Override default container args (useful when using custom images) + ## + args: [] + ## @param sentinel.enableServiceLinks Whether information about services should be injected into pod's environment variable + ## + enableServiceLinks: true + ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel + ## + preExecCmds: [] + ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsCM: "" + ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes + ## + extraEnvVarsSecret: "" + ## @param sentinel.externalMaster.enabled Use external master for bootstrapping + ## @param sentinel.externalMaster.host External master host to bootstrap from + ## @param sentinel.externalMaster.port Port for Redis service external master host + ## + externalMaster: + enabled: false + host: "" + port: 6379 + ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes + ## + containerPorts: + sentinel: 26379 + ## Configure extra options for Redis® containers' liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes + ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes + ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe + ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 22 + ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes + ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes + ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 6 + ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) + ## + enabled: false + ## @param sentinel.persistence.storageClass Persistent Volume storage class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner + ## + storageClass: "" + ## @param sentinel.persistence.accessModes Persistent Volume access modes + ## + accessModes: + - ReadWriteOnce + ## @param sentinel.persistence.size Persistent Volume size + ## + size: 100Mi + ## @param sentinel.persistence.annotations Additional custom annotations for the PVC + ## + annotations: {} + ## @param sentinel.persistence.labels Additional custom labels for the PVC + ## + labels: {} + ## @param sentinel.persistence.selector Additional labels to match for the PVC + ## e.g: + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param sentinel.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. + ## + medium: "" + ## @param sentinel.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. + ## + sizeLimit: "" + ## Redis® Sentinel resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers + ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers + ## + resources: + limits: {} + requests: {} + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context + ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser + ## @param sentinel.containerSecurityContext.runAsGroup Set Redis® Sentinel containers' Security Context runAsGroup + ## @param sentinel.containerSecurityContext.runAsNonRoot Set Redis® Sentinel containers' Security Context runAsNonRoot + ## @param sentinel.containerSecurityContext.allowPrivilegeEscalation Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation + ## @param sentinel.containerSecurityContext.seccompProfile.type Set Redis® Sentinel containers' Security Context seccompProfile + ## @param sentinel.containerSecurityContext.capabilities.drop Set Redis® Sentinel containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel + ## + extraVolumes: [] + ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) + ## + extraVolumeMounts: [] + ## Redis® Sentinel service parameters + ## + service: + ## @param sentinel.service.type Redis® Sentinel service type + ## + type: ClusterIP + ## @param sentinel.service.ports.redis Redis® service port for Redis® + ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel + ## + ports: + redis: 6379 + sentinel: 26379 + ## @param sentinel.service.nodePorts.redis Node port for Redis® + ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## NOTE: choose port between <30000-32767> + ## NOTE: By leaving these values blank, they will be generated by ports-configmap + ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port + ## + nodePorts: + redis: "" + sentinel: "" + ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP + ## + clusterIP: "" + ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service + ## + annotations: {} + ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param sentinel.service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods + ## + terminationGracePeriodSeconds: 30 + +## @section Other Parameters +## + +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false + +## Network Policy configuration +## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the ports + ## Redis® is listening on. When true, Redis® will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + metrics: + ## @param networkPolicy.metrics.allowExternal Don't require client label for connections for metrics endpoint + ## When set to false, only pods with the correct client label will have network access to the metrics port + ## + allowExternal: true + ## @param networkPolicy.metrics.ingressNSMatchLabels Labels to match to allow traffic from other namespaces to metrics endpoint + ## @param networkPolicy.metrics.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces to metrics endpoint + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later + ## + create: false + ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules + ## + enabled: false +## RBAC configuration +## +rbac: + ## @param rbac.create Specifies whether RBAC resources should be created + ## + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Redis® Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## +pdb: + ## @param pdb.create Specifies whether a PodDisruptionBudget should be created + ## + create: false + ## @param pdb.minAvailable Min number of pods that must still be available after the eviction + ## + minAvailable: 1 + ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic + ## + enabled: false + ## @param tls.authClients Require clients to authenticate + ## + authClients: true + ## @param tls.autoGenerated Enable autogenerated certificates + ## + autoGenerated: false + ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates + ## + existingSecret: "" + ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate Key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## + certCAFilename: "" + ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) + ## + dhParamsFilename: "" + +## @section Metrics Parameters +## + +metrics: + ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics + ## + enabled: false + ## Bitnami Redis® Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ + ## @param metrics.image.registry Redis® Exporter image registry + ## @param metrics.image.repository Redis® Exporter image repository + ## @param metrics.image.tag Redis® Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy Redis® Exporter image pull policy + ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets + ## + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.54.0-debian-11-r0 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Configure extra options for Redis® containers' liveness, readiness & startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## @param metrics.startupProbe.enabled Enable startupProbe on Redis® replicas nodes + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + ## @param metrics.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.command Override default metrics container init command (useful when using custom images) + ## + command: [] + ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname + ## Useful for certificate CN/SAN matching + ## + redisTargetHost: "localhost" + ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: + ## e.g.: + ## extraArgs: + ## check-keys: myKey,myOtherKey + ## + extraArgs: {} + ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context + ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set Redis® exporter containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set Redis® exporter containers' Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set Redis® exporter containers' Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.seccompProfile.type Set Redis® exporter containers' Security Context seccompProfile + ## @param metrics.containerSecurityContext.capabilities.drop Set Redis® exporter containers' Security Context capabilities to drop + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsGroup: 0 + runAsNonRoot: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar + ## + extraVolumes: [] + ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar + ## + extraVolumeMounts: [] + ## Redis® exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.resources.limits The resources limits for the Redis® exporter container + ## @param metrics.resources.requests The requested resources for the Redis® exporter container + ## + resources: + limits: {} + requests: {} + ## @param metrics.podLabels Extra labels for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + ## Redis® exporter service parameters + ## + service: + ## @param metrics.service.type Redis® exporter service type + ## + type: ClusterIP + ## @param metrics.service.port Redis® exporter service port + ## + port: 9121 + ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g. + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service + ## + annotations: {} + ## @param metrics.service.clusterIP Redis® exporter service Cluster IP + ## + clusterIP: "" + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped + ## + interval: 30s + ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.podTargetLabels Labels from the Kubernetes pod to be transferred to the created metrics + ## + podTargetLabels: [] + ## @param metrics.serviceMonitor.sampleLimit Limit of how many samples should be scraped from every Pod + ## + sampleLimit: false + ## @param metrics.serviceMonitor.targetLimit Limit of how many targets should be scraped + ## + targetLimit: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules Custom Prometheus rules + ## e.g: + ## rules: + ## - alert: RedisDown + ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down + ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down + ## - alert: RedisMemoryHigh + ## expr: > + ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 + ## / + ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} + ## > 90 + ## for: 2m + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + ## - alert: RedisKeyEviction + ## expr: | + ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 + ## for: 1s + ## labels: + ## severity: error + ## annotations: + ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys + ## description: | + ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + ## + rules: [] + +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param volumePermissions.image.registry OS Shell + Utility image registry + ## @param volumePermissions.image.repository OS Shell + Utility image repository + ## @param volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r60 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## init-sysctl container parameters +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctl: + ## @param sysctl.enabled Enable init container to modify Kernel settings + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param sysctl.image.registry OS Shell + Utility image registry + ## @param sysctl.image.repository OS Shell + Utility image repository + ## @param sysctl.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param sysctl.image.digest OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param sysctl.image.pullPolicy OS Shell + Utility image pull policy + ## @param sysctl.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r60 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) + ## + command: [] + ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` + ## + mountHostSys: false + ## Init container's resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param sysctl.resources.limits The resources limits for the init container + ## @param sysctl.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + +## @section useExternalDNS Parameters +## +## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. +## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. +## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. Setting this to `false` will disable annotations. +## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. +## +useExternalDNS: + enabled: false + suffix: "" + annotationKey: external-dns.alpha.kubernetes.io/ + additionalAnnotations: {} diff --git a/install-bundler/network/.helmignore b/install-bundler/network/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/install-bundler/network/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/install-bundler/network/Chart.yaml b/install-bundler/network/Chart.yaml new file mode 100644 index 00000000..602b71da --- /dev/null +++ b/install-bundler/network/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: network +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/install-bundler/network/README.md b/install-bundler/network/README.md new file mode 100644 index 00000000..e7fe3ceb --- /dev/null +++ b/install-bundler/network/README.md @@ -0,0 +1 @@ +https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer \ No newline at end of file diff --git a/install-bundler/network/install.sh b/install-bundler/network/install.sh new file mode 100644 index 00000000..b71bce4d --- /dev/null +++ b/install-bundler/network/install.sh @@ -0,0 +1,52 @@ +set -e + +NAMESPACE=$1 +DNS_NAME=$2 +IP_NAME=$3 +CHAINS_CFG_FILENAME=$4 +# shift 5 +# CHAINS=("$@") + +HELM_RELEASE="network"; + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PARENT_DIR="$(dirname "$DIR")" + +echo "$DNS_NAME" +echo "$CHAINS_CFG_FILENAME" +echo "$PARENT_DIR" + + +echo "$PARENT_DIR/configs/chains/$CHAINS_CFG_FILENAME" +source "$PARENT_DIR/configs/chains/$CHAINS_CFG_FILENAME" +array_names=$(declare -p | grep -Eo 'chain_[a-zA-Z0-9_]+') + +# Iterate over these names and extract the chainId values +declare -a CHAIN_IDS +for array_name in $array_names; do + eval "chain_id=\${$array_name[chainId]}" + CHAIN_IDS+=("$chain_id") +done +echo "Chains that are being added to Ingress ${CHAIN_IDS[*]}" + + +CHAINS_STR="{$(echo "${CHAIN_IDS[@]}" | tr ' ' ',')}" + + +echo "$CHAINS_STR" +echo "IP_NAME that will be attached to Ingress is ${IP_NAME}" +helm upgrade --install --wait --timeout 720s $HELM_RELEASE "$DIR/." \ + -f "$DIR/values.yaml" \ + --set provider="$PROVIDER" \ + --set prometheus.enabled=true \ + -n "$NAMESPACE" \ + --set CHAINS="$CHAINS_STR" \ + --set bundlerName="bundlers" \ + --set STATIC_IP_NAME="$IP_NAME" \ + --set-string namespace="$NAMESPACE" \ + --set average_http_requests_hpa=30 \ + --set average_cpu_hpa=500m \ + --set commonName="$DNS_NAME" \ + --set domains[0]="$DNS_NAME" \ + --set domains[1]="www.$DNS_NAME" +echo -e "####### Deployed $HELM_RELEASE to $NAMESPACE ####### \n"; \ No newline at end of file diff --git a/install-bundler/network/templates/_helpers.tpl b/install-bundler/network/templates/_helpers.tpl new file mode 100644 index 00000000..86588fe2 --- /dev/null +++ b/install-bundler/network/templates/_helpers.tpl @@ -0,0 +1,107 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "chart.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "Chart.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + + +{{- define "chart.namespace" -}} + {{- if .Values.namespace -}} + {{- .Values.namespace -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + + + + + +{{/* +Common labels +*/}} +{{- define "chart.labels" -}} +helm.sh/chart: {{ include "chart.name" . }} +{{ include "chart.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + + +{{- define "chart.bundler.name" -}} +{{ .Values.bundlerName}} +{{- end }} + + + +{{/* +Selector labels +*/}} +{{- define "chart.selectorLabels" -}} +app: {{ include "chart.bundler.name" . }} +{{- end }} + +{{- define "chart.certificate.name" -}} +{{ include "chart.name" . }}-certificate +{{- end }} + + +{{- define "chart.clusterssuer.name" -}} +{{ include "chart.name" . }}-clusterssuer +{{- end }} + +{{- define "chart.ingress.name" -}} +{{ include "chart.name" . }}-ingress +{{- end }} + +{{- define "chart.ingress.metadata" -}} +name: {{ include "chart.ingress.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + +{{/* +Define metadata for chart pod. +*/}} +{{- define "chart.certificate.metadata" -}} +name: {{ include "chart.certificate.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + + +{{/* +Define metadata for chart pod. +*/}} +{{- define "chart.clusterssuer.metadata" -}} +name: {{ include "chart.clusterssuer.name" . }} +namespace: {{ include "chart.namespace" . }} +labels: +{{ include "chart.labels" . | nindent 2 }} +{{- end }} + + diff --git a/install-bundler/network/templates/ingress.yaml b/install-bundler/network/templates/ingress.yaml new file mode 100644 index 00000000..32d50291 --- /dev/null +++ b/install-bundler/network/templates/ingress.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: +{{ include "chart.ingress.metadata" . | nindent 2 }} + annotations: + kubernetes.io/ingress.global-static-ip-name: {{.Values.STATIC_IP_NAME}} + networking.gke.io/managed-certificates: {{ include "chart.certificate.name" .}} + kubernetes.io/ingress.class: "gce" + kubernetes.io/ingress.allow-http: "false" + cloud.google.com/health-check-path: "/health" + +spec: + + rules: + - http: + paths: +{{- range $chain := .Values.CHAINS }} + - backend: + service: + name: chain-{{$chain}}-service + port: + number: 3000 + path: /api/v2/{{$chain}}/ + pathType: Prefix +{{- end }} + +{{- range $chain := .Values.CHAINS }} + - backend: + service: + name: chain-{{$chain}}-service + port: + number: 3000 + path: /{{$chain}}/health + pathType: Exact +{{- end }} \ No newline at end of file diff --git a/install-bundler/network/templates/managed-cert.yaml b/install-bundler/network/templates/managed-cert.yaml new file mode 100644 index 00000000..16436912 --- /dev/null +++ b/install-bundler/network/templates/managed-cert.yaml @@ -0,0 +1,10 @@ + +#https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs?cloudshell=true + +apiVersion: networking.gke.io/v1 +kind: ManagedCertificate +metadata: +{{ include "chart.certificate.metadata" . | nindent 2 }} +spec: + domains: +{{ toYaml .Values.domains | indent 4 }} diff --git a/install-bundler/network/values.yaml b/install-bundler/network/values.yaml new file mode 100644 index 00000000..75d12f6d --- /dev/null +++ b/install-bundler/network/values.yaml @@ -0,0 +1,30 @@ +namespace: "" + +port: "3000" +targetPort: "3000" +replicaCount: 2 +resource: + requests: + memory: "500Mi" + cpu: "200m" + limits: + memory: "1Gi" + cpu: "500m" + +secret: + projectID: biconomy-test + key: sdk-test-relayer-node-service + config: + name: config.json.enc + + passphrase: + key: sdk-test-relayer-node-service-passphrase + name: CONFIG_PASSPHRASE + version: latest + +# datadog configs +affinity_tolerations: + enable: false + +datadog: + enable: false \ No newline at end of file diff --git a/install-bundler/output.txt b/install-bundler/output.txt new file mode 100644 index 00000000..a7728935 --- /dev/null +++ b/install-bundler/output.txt @@ -0,0 +1,378 @@ +Release "bundler-80001" does not exist. Installing it now. +NAME: bundler-80001 +LAST DEPLOYED: Tue Oct 24 18:18:41 2023 +NAMESPACE: tw-prod +STATUS: pending-install +REVISION: 1 +TEST SUITE: None +USER-SUPPLIED VALUES: +CHAIN_ID: 80001 +affinity_tolerations: + enable: false +datadog: + enable: false +env: prod +fundingBalanceThreshold: "0.1" +fundingRelayerAmount: "0.2" +gcpSecretManagerName: "" +gcpSecretName: tw-prod-80001-CONFIG-PASSPHRASE +hpa: + average_cpu_hpa: 500m + average_http_requests_hpa: 10 + maxReplicas: 20 + minReplicas: 1 +image: + name: us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler + tag: 0.3-amd64 +maxRelayerCount: "5" +minRelayerCount: "3" +nameOverride: chain-80001 +namespace: tw-prod +port: "3000" +projectId: prj-biconomy-prod-001 +prometheus: + enabled: true +provider: cloud +replicaCount: 1 +resource: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi +secret: + config: + name: config.json.enc + key: bundlers + passphrase: + key: passphrase + name: CONFIG_PASSPHRASE + value: "" + version: latest + projectID: biconomy-test +targetPort: "3000" + +COMPUTED VALUES: +CHAIN_ID: 80001 +affinity_tolerations: + enable: false +datadog: + enable: false +env: prod +fundingBalanceThreshold: "0.1" +fundingRelayerAmount: "0.2" +gcpSecretManagerName: "" +gcpSecretName: tw-prod-80001-CONFIG-PASSPHRASE +hpa: + average_cpu_hpa: 500m + average_http_requests_hpa: 10 + maxReplicas: 20 + minReplicas: 1 +image: + name: us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler + tag: 0.3-amd64 +maxRelayerCount: "5" +minRelayerCount: "3" +nameOverride: chain-80001 +namespace: tw-prod +port: "3000" +projectId: prj-biconomy-prod-001 +prometheus: + enabled: true +provider: cloud +replicaCount: 1 +resource: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 200m + memory: 512Mi +secret: + config: + name: config.json.enc + key: bundlers + passphrase: + key: passphrase + name: CONFIG_PASSPHRASE + value: "" + version: latest + projectID: biconomy-test +targetPort: "3000" + +HOOKS: +MANIFEST: +--- +# Source: bundler/templates/encrptedConfigConfigmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: chain-80001-configmap + namespace: tw-prod + labels: + + helm.sh/chart: chain-80001 + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + app.kubernetes.io/version: "v0.0.1" + app.kubernetes.io/managed-by: Helm + +data: + config.json.enc : FcTHSYNLeAL2+FlSXp6w2WQ3FLDCGK4Zo8G32sZN9wE=f287180af544417640d94a1eebf375059f44ae1fbed94f89db74cee601800912OdBK+trBlnDFSjhIIpvy9qGcAR4Z7hIRfGHxgVwHxynPTcbj1BjkvHlXxrKKS9DZddns0tIVW1XbfwXpJe+2Z6XbYOsIaiUGs5I+nhB7/fN/nc4kdTA8DdudSaFP8OOFOdXVy9a78W/onps6X33bESZq4mqwVoXTTUaRBsILw432CgA2NkU3Rn4RoKfDxS4KAkr3wU7n7/rYUNJU1k7HWbhkCzt24wLZF+oPNl+S9+3PiX9gJOSjcuVmxUkJ4z+rPS3hhInCu6ZSGwzORrIU/zw3sFOlM5P3oAlnCPuI3XybX+RXtj3R9HmQe+5OH+6ISuC883TJGJ1E7+i6ABy3zJHav+ViOWxOiuxGnNSAa20hlGIe3NklEZL86quw5sBljuWo4qAdnEfLIamvGtHbdWOrhEM2rjTmXJCllyscXMwfsDUKvvFhu/dIt2zVE3scBeceQsPzffMAiXnGJNWs7kQmqY7Xz8hPSPkWP31JXyijK8nJshe1kK2taqQK1hb+C6is/Wanq67W25u29xX/AE9ratS4+Gt6+ocsKDVgKQGmt96ttwA12lo8DB0mayKz3zPy3RJv8IIdyq4Igmrpw4oonbpzGHEJXkizfB1KjbAdO8/X11HmFseFyv8c642WKWhoEV3916R707CrpsfWKzthjy/vjCXWnL/tvPHIO+qgn4w8NEt3z3e1SkbnmR0CK9TIJWH9JRscgv6T/mNTM4uD1atEuGpjhgXrwSbUAnJ7jOyoxtHIhjiZ92ahphMV3llwC21VOFQ9BVEKal4Lvc6GIcMw79zGQKaJGjJWwXspjOfjLUvQmHSNoeV2Yt55hsrsHLbkT6f5JWiR0IcmWlMrb7dImbFI9zR7gr+XgC4G7WUE+uU503Wly+29GrM90iuSmMq7NTchiUlbgEFkjeeAawWbGAEXBo3V12iXWeDNyQMk6TwT+PweaOw7MmiJ4047kydpC4vJYYgLMpBAvj8v2l0JsJ2jCn1wbldunCJb4TGmszXLG2pBxfB2pLHfUcyJpYg6yBeb/KXkxHmlnQaZyB7PKIRP0c4fjaMBiQ6YiYKhampV1ij73y2C4TEQUy7sGPcyGB2Hs5ile56tOfDFrxolj9u70dMYC+I420sWaR9f0RvToujeFDtXz6Z55Sn1gfopzRfPQZ+fbCAWCX3EXZ5pXOOP/8Y4R/7QKdJSvaThCTczob0b5oJwe0TDwgwpB5kp5eA6brKSsmTV7IWg5pEG9zvCUgmrOB/5dgml6MnmYtj+vLh+XQT/K017CTyf+zcntig5pH3g96BJ4PDsb0jMFus+H4da2sjk4J8LErWGmVaVstODgMhZzewE8NTHJjdRnjTw2nVOKCaRoy1TAifprLZgkayC2KP0tFHUzMQU3EeQHCZIjSc3b5zRxn/EMNulmtxtKwvi4EB789NRY+dubxxYJlg0zln/B4ls8yEnZwiKsb3iegt9DLqoEa5u/8FsHamg36uoxxcy1tU6TBsXSDD67tCdtbxSGcAY5o7DoWLu69+sid7vKc8daB/dSgQRd7nVdP6i4XEfj9+R1gFmQfoOFF1/Dgm21kWQbucgUMnNnA+NG+AFHXWnOtW9CoAxEfhGXszLoHNWsr/cjoZC/uawXV2H6X7rr35NuS1fhtfi9Hz4hXlbKT0NAp/y0XkPg0H7byiYJyIYLcoFIQnMy912hMHAu3jC4JTeB9iHh3b5BZO3wqSYe4cTHDoV1umd4AfEC9/f5p/8KAFA9EvHmHpjOL6qP0WTw0oeVwDOf/zb34mUwg+1K065EVZTEqainrm16v8lq49aFAvSX84IHIYoPTJM2RRm+Bf6G/v+eqC2saRGQ7aEa6dt7MTPr9yXReK0YgkTXplCpvIyaIMhA9jDXvEGw2MhX9zGtG7t4LpfsXZVOBH48KDO5jZxU6QhrxC49oxc19xWqh7HH7U11lv1CrNj6wy0sc/ZFvIayjLX/vmKUDWonVgch7U3lITZCIauNWdVywxwBcodPhQdutEJM+F1F5T5qB1pWiLMXlZkg7ViRKoMyYNcrFsxRonyFnpIrB9VdY10oaVLKR6BpGiBjXTUURakktu4l++w/ltaa+2uxN2KXVmSJLzzHNd+zSaZbXNkaHjFqGo+KtRCPFlTeEuYR37ANBoRemw+NOcXtLMYMFikgcBghNb6CVcp0P6OFsXW6K3E9o3NSl6lzu0Kg99FggollTX3FPLSPVMVVGUjbIEUZ9VrpJUmSjaSRuUfniFFjwaSkEccc6IIzSyOTVosslyNsr5L6HBsykOZ2XViql4mcF7/gAcXhJrQTuXKXJ3+ZqIzA2XbNA2CgpRgfINSPbXO37z6meoMrkdgna+iHjmmBIA2lHmc2qOv4eer/CTn4paOas4IXCLyIVV4jEx6lZGxaDja+uWXmOtxWN04Nl+6qilA3zJbN0i85nyS+6hU5avKCmfcOg2aatOGDqb8Vic++quoiUNtqwR5HbNksQx9em3ouhMkzG87cL6NLF54kED3y7zFIQOZxONb1Jdiev82wXWgc13euHlt33BDd7bwldRAlAi/ILGtKh8Fw6uQfZXFCVACPpCcpWeNYMB/57HKcDM7YlbvXosUgiVJqSKrqcnvsKHhMdEnZxRxLPNcyfUpfy/Mns1vrNNHvBG+W6yInbPAefEMuZsQ7rTd2A1TZFVChLKO6mBX+PixkNCHbbplcebzSjlZO3gPyU3wafG9s3o0g7JhhSOga8IfRJgKPDhmFwG+jNuL8DsCbKHD2t7sEl4cPBPqWZjDNzUHvtvzxf7aSS5IG9SwZDsC7mJeuwwfZxldiwXZGeLAQkwIDJyZWE5TQVEpyoC4Jeos44mBvIQv3rMOGpiN/Z7UqFKw6LmkviYHKZgQVapWaZRFZMt9XrK1L4h1xLjHC58vsTMiwLYE5QIk7kfWbYeHwvJUhcO9CoVdeOG4jeS7Opf3mFDcNi/eclrRqTwykJz6L4PfHIDKkZoaDXn8bj7c7pc10WWlhtq2tmIL56wJWcjD3QmqNvCnpY8AXIp6aX8hR2wSHrVSu6iYnpCJLYR2KghSjB6bO/MTncVElQ22BFjWCd3YmIAqpZI0EBVQ/M/EnYEsUHaOFYorONqY8JCj6VpFiqI7S+eJxj9CRm4AAruJkJN/8Yi8xs7Pw/+jC50jjaZXunZAx1mkh2nW/I4GEgdQtFn1PxS2yDXvHhm4uhqADPnwGdx6uR+atykem6CZq5jaSGphBwRxUiGWqgDWzmcfZbqhoBhi2Kg0WhJDRz+TkjO5icfOwKxTrxgCM/8DTua/EHA6z4RyiNpGcWaCf950wiMwErnSzE9OzafDrMU4DLG9tY/VIhQlTVNQLQKf202LDp9ZpPPX+vjGVslZKk2ueKqt6ftLrsalrnCrpMYKzB2CPqee2F9VWyVnpRISlYTlF1SaOgO+3AZG+++Gp8bkM/of3mOi9IL1+8krd4hcb3iq+Tfww9YH3mMRd+2acBeWyiVef6DBEpIO+rl7Oi/4zY4+eoWpFrbw7K5RNfZ3GvyHmW2r1FdOevrcY2Xrdbl+g+JyoBBnZf3+qwz0K+t635WRHNi5zkabdUOs53NrXGQz7pajs8mb5IX0GlHttclz0MWRzKzSmpbviPtdesbAdDHLOb7UscuCdR423eqt0QjY8zr4om4V7gAF5dt+1PZiFaLm6etJ/3kab/4R+ungJBhlA4nhL0F5qtcfHXl9az9yMFhGZFwdHxH/UCXaG0cFL25w3ZZkIQWQyFrjllclN3ML63ftRuzLj6KAySQSlAsEF3oCvsKsT4/7+UUgJ5VwC8OLRLBNb8RD5CNoGAxrKPucIFroWZWnz0EJnnulCocuNg4VYhgS5nAM78MeRqIPJ7ztA4VxVkpc/XcUV+pMXBHJjWEv2JQdTKw9H6I8WetrzhyVQvk299io8ZtPplqZ3hlcmaVdLoH7Wg59838jHoF3BWIAFGhN0aisWiXhFn3rhceUfw/yesIdnjVDRMxBJW/8+X9Qu+PMt+eqs6RxAnin+fToR6wwSWV1Q94pfwMiLdcUDfL2D7g02lEvFnuEtSn82fERksqaczmPC/TlyzWElgkmQPaIVfZMw+r+gdVPL/6e9Il0i396YhLqVtWFzV4grDWaIXW95hJUlcZ2roqZpz7ioxd1K0AhIQFojnM8BAgPf+fxozw43kXGkI419vgrzvrMRhfyxPbkahmhbWZatmxjta3tb8Qaa94BJhYQAUGpDcLd9RFEQ2xQ1iJQD9LE9dQ0NNKM0dQXnwjlo11wtLvP1zKPaKj7h0+288hRPo2HrruB07rJEu3fNBQQODcJC77CUfeZ7padk2Xhc5o6kTYWdHXVAPQEBEAetV358kHx7xzIUekDWmIBVKsIHI7zP036QCwH/CxcZ2VFBgh5rSM7EiPrYTtwCQoxp8UF55eo9Ig/9HvQMt5oDVYm0yTZcNWsbWSvCUB2QVyAi0JpeGZwUFBoVnbDvV1AbzTwuJTkFepcdiC4vMuNiVUSnI7KE7/dy7XlN6zxlNbeubknXHXpnGM7Zv1sZYYsP8GPqfQQ/xn2SCDe1dH9xYnJPBiqHE1xQz2kFx+n8p4mOB4kNdE9IOIjlwBvb/IZF5Hvg+v/iWCPaVXzNDZR+wtMgROfdSumekVt4BZzQTMmJuGCDIWh5k4KUmzKbI63Lp7IEmqqqNUNaddpEf1HarrerMpNGnb2g11MYpbC1sBklSoCVDB6DOa7aagH3Tl6DDNOvD9N0429jYvZapKNNClHplf82QQfTat5j1bKAyt1Zta1yiMj5bvyNObruuEDiHVsqljtSbIpNmyrEhD8Ssp/XoO7zGVYhAiBx8O5HtSxn4pbIryrap3RirvY8Z38yRfE4XedKQW9fjauJ11e6ZchPdXi0JZ9bgcOKL+nxhTlKYN7zSbmUcfaQ5qOrLRr/CKxdTojDxLmu83HfsDOvnfvdwyqNvj+8HcCyAXbdMdpFnIsekmsrFaW45Ok8fPv2o6ym1Q6jwDFybEWAwYA3wtnuSbXJx7KnstZtZ/cs3Cc6lrjxoi7HFL7SrVUvYsy0r/2lUqOQMWiLXifpN/HeF0dP9/WkxnjH/WwSDe5mY2qpgnr +--- +# Source: bundler/templates/envConfigMap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + + name: chain-80001-env-configmap + namespace: tw-prod + labels: + + helm.sh/chart: chain-80001 + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + app.kubernetes.io/version: "v0.0.1" + app.kubernetes.io/managed-by: Helm +data: + MIN_RELAYER_COUNT: "3" + MAX_RELAYER_COUNT: "5" + FUNDING_BALANCE_THRESHOLD: "0.1" + FUNDING_RELAYER_AMOUNT: "0.2" +--- +# Source: bundler/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + + name: chain-80001-service + namespace: tw-prod + labels: + + helm.sh/chart: chain-80001 + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + app.kubernetes.io/version: "v0.0.1" + app.kubernetes.io/managed-by: Helm + annotations: + cloud.google.com/neg: '{"ingress": true}' +spec: + selector: + + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + ports: + - protocol: TCP + port: 3000 + targetPort: 3000 +--- +# Source: bundler/templates/hpa.yaml +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + # name: sample-app + + name: chain-80001-hpa + namespace: tw-prod + chainId: 80001 + labels: + + helm.sh/chart: chain-80001 + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + app.kubernetes.io/version: "v0.0.1" + app.kubernetes.io/managed-by: Helm + +spec: + scaleTargetRef: + # point the HPA at the sample application + # you created above + apiVersion: apps/v1 + kind: StatefulSet + name: chain-80001-statefulset + + # autoscale between 1 and 10 replicas + minReplicas: 1 + maxReplicas: 20 + metrics: + # use a "Pods" metric, which takes the average of the + # given metric across all pods controlled by the autoscaling target + - type: Pods + pods: + # use the metric that you used above: pods/http_requests + metric: + name: 80001-http-requests + # target 500 milli-requests per second, + # which is 1 request every two seconds + # http_requests_per_two_minute metric goes beyond 100, + # then you should set the averageValue for that metric + # to 100 (or 100000m, as 1 = 1000m). + target: + type: AverageValue + averageValue: 10 + + - type: Pods + pods: + metric: + name: 80001-cpu-usage + target: + type: AverageValue + averageValue: 500m # Adjust the averageValue as per your requirements. +--- +# Source: bundler/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + + name: chain-80001-statefulset + namespace: tw-prod + + labels: + + helm.sh/chart: chain-80001 + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + app.kubernetes.io/version: "v0.0.1" + app.kubernetes.io/managed-by: Helm + +spec: + serviceName: chain-80001-service + + replicas: 1 + + selector: + matchLabels: + + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + template: + metadata: + labels: + + app.kubernetes.io/name: chain-80001 + app.kubernetes.io/instance: bundler-80001 + app: chain-80001 + namespace: tw-prod + + annotations: + + + releaseTime: "2023-10-24 12:48:41Z" + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "3000" + + spec: + serviceAccountName: tw-prod-service-account + + initContainers: + + - name: secrets-init + image: doitintl/secrets-init:0.3.6 + command: + - sh + args: + - -c + - "cp /usr/local/bin/secrets-init /secrets-init/bin/" + volumeMounts: + - mountPath: /secrets-init/bin + name: secrets-init-volume + + - name: init-ordinal + image: busybox:1.28 + command: + - "sh" + - "-c" + - > + echo $(echo $POD_NAME | awk -F '-' '{print $NF}') > /etc/podinfo/ordinal_index; + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: podinfo + mountPath: /etc/podinfo + containers: + - name: chain-80001-pod + imagePullPolicy: "IfNotPresent" + image: us-docker.pkg.dev/prj-biconomy-prod-001/bundler/bundler:0.3-amd64 + ports: + - containerPort: 3000 + + command: + - "/secrets-init/bin/secrets-init" + args: + - "--provider=google" + - "/entrypoint.sh" + + env: + - name: CHAIN_ID + value: "80001" + - name: CONFIG_PASSPHRASE + value: gcp:secretmanager:projects/prj-biconomy-prod-001/secrets/tw-prod-80001-CONFIG-PASSPHRASE + + + envFrom: + - configMapRef: + name: chain-80001-env-configmap + + volumeMounts: + - mountPath: "/relayer-node/config.json.enc" + name: config-volume + subPath: "config.json.enc" + + - mountPath: /secrets-init/bin + name: secrets-init-volume + + - name: podinfo + mountPath: /etc/podinfo + resources: + requests: + memory: 512Mi + cpu: 200m + limits: + memory: 1Gi + cpu: 500m + livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 60 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + failureThreshold: 3 + + volumes: + - name: config-volume + configMap: + name: chain-80001-configmap + + - name: env-config-volume + configMap: + name: chain-80001-env-configmap + + - name: podinfo + emptyDir: {} + + - name: secrets-init-volume + emptyDir: { } + diff --git a/install-bundler/sample-chains.sh b/install-bundler/sample-chains.sh new file mode 100644 index 00000000..5308808a --- /dev/null +++ b/install-bundler/sample-chains.sh @@ -0,0 +1,38 @@ + +declare -A chain_avalanche=( + [name]='chain-43113' + [chainId]="43113" + [autoScalingThreshholdHTTPRequests]=15 + [autoScalingThreshholdCPU]=500m + [minRelayerCount]="3" + [maxRelayerCount]="5" + [fundingBalanceThreshold]="0.1" + [fundingRelayerAmount]="0.2" + [replica]=1 +) + +declare -A chain_mumbai=( + [name]='chain-80001' + [chainId]="80001" + [autoScalingThreshholdHTTPRequests]=10 + [autoScalingThreshholdCPU]=500m + [minRelayerCount]="3" + [maxRelayerCount]="5" + [fundingBalanceThreshold]="0.1" + [fundingRelayerAmount]="0.2" + [replica]=1 + +) + + +declare -A chain_polygon=( + [name]='chain-137' + [chainId]="137" + [autoScalingThreshholdHTTPRequests]=15 + [autoScalingThreshholdCPU]=700m + [minRelayerCount]="5" + [maxRelayerCount]="12" + [fundingBalanceThreshold]="0.1" + [fundingRelayerAmount]="0.3" + [replica]=1 +) diff --git a/install-bundler/setup-scripts/check-secrets.sh b/install-bundler/setup-scripts/check-secrets.sh new file mode 100644 index 00000000..75f4956d --- /dev/null +++ b/install-bundler/setup-scripts/check-secrets.sh @@ -0,0 +1,13 @@ + + +PROJECT_ID=$1 +SECRET_NAME=$2 + + +# echo "9090biconew" | gcloud secrets versions add $SECRET_NAME --data-file=- +# Check if the secret exists in GCP Secret Manager +if gcloud secrets describe $SECRET_NAME --project=$PROJECT_ID &> /dev/null; then + echo "The secret exists in GCP Secret Manager." +else + echo "The secret does not exist in GCP Secret Manager." +fi \ No newline at end of file diff --git a/install-bundler/setup-scripts/create-encrypted-config-secret.sh b/install-bundler/setup-scripts/create-encrypted-config-secret.sh new file mode 100644 index 00000000..4bc0c4ba --- /dev/null +++ b/install-bundler/setup-scripts/create-encrypted-config-secret.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +GCP_ENCRYPTED_CONFIG_SECRET=$1 + +# Check if the secret already exists +if gcloud secrets describe "$GCP_ENCRYPTED_CONFIG_SECRET" &>/dev/null; then + echo "Secret $GCP_ENCRYPTED_CONFIG_SECRET already exists." + read -p -r "Do you want to overwrite? (yes/no): " RESPONSE + case $RESPONSE in + [yY] | [yY][eE][sS]) + # Ask the user for the file path + read -p -r "Enter the file path for overwriting the secret: " FILEPATH + + # Check if the provided file path is valid + if [[ ! -f $FILEPATH ]]; then + echo "Error: Invalid file path." + exit 1 + fi + + # Update the existing secret + gcloud secrets versions add "$GCP_ENCRYPTED_CONFIG_SECRET" --data-file="$FILEPATH" + echo "Secret $GCP_ENCRYPTED_CONFIG_SECRET has been updated." + ;; + *) + echo "User chose not to overwrite. Continuing without updating the secret." + ;; + esac + +else + GIT_ROOT=$(git rev-parse --show-toplevel) + FILEPATH="${GIT_ROOT}/config.json.enc" + echo "GCP Secret $GCP_ENCRYPTED_CONFIG_SECRET doesn't exist." + echo "Creating it now from ${FILEPATH}" + + # Check if the provided file path is valid + if [[ ! -f $FILEPATH ]]; then + echo "Error: Invalid file path ${FILEPATH}" + exit 1 + fi + + # Create a new secret + gcloud secrets create "${GCP_ENCRYPTED_CONFIG_SECRET}" --data-file="${FILEPATH}" + echo "Secret ${GCP_ENCRYPTED_CONFIG_SECRET} has been created." +fi + +echo "GCP_ENCRYPTED_CONFIG_SECRET $GCP_ENCRYPTED_CONFIG_SECRET process completed." \ No newline at end of file diff --git a/install-bundler/setup-scripts/create-gcp-role.sh b/install-bundler/setup-scripts/create-gcp-role.sh new file mode 100644 index 00000000..e0f91bbe --- /dev/null +++ b/install-bundler/setup-scripts/create-gcp-role.sh @@ -0,0 +1,22 @@ + +PROJECT_ID=$1 +GCP_IAM_ROLE=$2 +GCP_ENCRYPTED_CONFIG_SECRET=$3 +SECRET_NAME=$4 + + +echo "Creting GCP IAM role/service [$GCP_IAM_ROLE] account" +gcloud iam service-accounts create "${GCP_IAM_ROLE}" --display-name="Read secrets [$GCP_ENCRYPTED_CONFIG_SECRET] [$SECRET_NAME]" +echo "Creation of GCP IAM role/service [$GCP_IAM_ROLE] account completed" + +echo "Adding role binding of GCP IAM role/service [$GCP_IAM_ROLE] to GCP_ENCRYPTED_CONFIG_SECRET=[$GCP_ENCRYPTED_CONFIG_SECRET]" +gcloud secrets add-iam-policy-binding "${GCP_ENCRYPTED_CONFIG_SECRET}" \ + --member="serviceAccount:$GCP_IAM_ROLE@$PROJECT_ID.iam.gserviceaccount.com" \ + --role='roles/secretmanager.secretAccessor' +echo "Role binding of GCP IAM role/service [$GCP_IAM_ROLE] to GCP_ENCRYPTED_CONFIG_SECRET=[$GCP_ENCRYPTED_CONFIG_SECRET] completed" + +echo "Adding role binding of GCP IAM role/service [$GCP_IAM_ROLE] to SECRET_NAME=[$SECRET_NAME]" +gcloud secrets add-iam-policy-binding "${SECRET_NAME}" \ + --member="serviceAccount:$GCP_IAM_ROLE@$PROJECT_ID.iam.gserviceaccount.com" \ + --role='roles/secretmanager.secretAccessor' +echo "Role binding of GCP IAM role/service [$GCP_IAM_ROLE] to SECRET_NAME=[$SECRET_NAME] completed" \ No newline at end of file diff --git a/install-bundler/setup-scripts/create-secret.sh b/install-bundler/setup-scripts/create-secret.sh new file mode 100644 index 00000000..5c9dc84c --- /dev/null +++ b/install-bundler/setup-scripts/create-secret.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +SECRET_NAME=$1 + +# Check if the secret already exists +if gcloud secrets describe "$SECRET_NAME" &> /dev/null; then + echo "Secret $SECRET_NAME already exists." + # Ask for confirmation to update the secret + read -rp "Do you want to update the secret? (y/N): " CONFIRMATION + if [[ "$CONFIRMATION" =~ ^[Yy]$ ]]; then + # The user confirmed to update the secret + read -rsp "Enter the new password: " PASSWORD + echo # move to a new line + echo -n "$PASSWORD" | gcloud secrets versions add "$SECRET_NAME" --data-file=- + if [[ $? -eq 0 ]]; then + echo "Secret $SECRET_NAME updated successfully." + else + echo "Failed to update secret $SECRET_NAME." + fi + else + echo "Update canceled." + fi +else + echo "Secret $SECRET_NAME does not exist." + # Prompt for the password since we are creating a new secret + read -rsp "Enter the password: " PASSWORD + echo # move to a new line + echo -n "$PASSWORD" | gcloud secrets create "$SECRET_NAME" --data-file=- --replication-policy="automatic" + if [[ $? -eq 0 ]]; then + echo "Secret $SECRET_NAME created successfully." + else + echo "Failed to create secret $SECRET_NAME." + fi +fi diff --git a/install-bundler/setup-scripts/create-variables.sh b/install-bundler/setup-scripts/create-variables.sh new file mode 100644 index 00000000..260f8e28 --- /dev/null +++ b/install-bundler/setup-scripts/create-variables.sh @@ -0,0 +1,14 @@ + +CONFIG_FILE=$1 + +CURRENT_CONTEXT=$(kubectl config current-context) +CLUSTER_NAME=$(echo "$CURRENT_CONTEXT" | awk -F '_' '{print $NF}') +TRIMMED_CLUSTER_NAME=$(echo "$CLUSTER_NAME" | cut -c 1-8) +TRIMMED_NAMESPACE=$(echo "$NAMESPACE" | cut -c 1-10) + +GCP_IAM_ROLE="$TRIMMED_CLUSTER_NAME-$TRIMMED_NAMESPACE-bund" +GCP_ENCRYPTED_CONFIG_SECRET="$TRIMMED_CLUSTER_NAME-$TRIMMED_NAMESPACE-config" +GCP_PLAINTEXT_CONFIG_SECRET="$TRIMMED_CLUSTER_NAME-$TRIMMED_NAMESPACE-cfg-plain" +GCP_PLAIN_CONFIG_SECRET="$ENV-tw-bundler-plain-config" +SECRET_NAME="$TRIMMED_CLUSTER_NAME-$TRIMMED_NAMESPACE-passphrase" +KUBE_SERVICE_ACCOUNT="$TRIMMED_CLUSTER_NAME-$TRIMMED_NAMESPACE-$ENV-sa" \ No newline at end of file diff --git a/install-bundler/setup-scripts/gcp-role-binding.sh b/install-bundler/setup-scripts/gcp-role-binding.sh new file mode 100644 index 00000000..2269f657 --- /dev/null +++ b/install-bundler/setup-scripts/gcp-role-binding.sh @@ -0,0 +1,28 @@ + + +NAMESPACE=$1 +PROJECT_ID=$2 +GCP_IAM_ROLE=$3 +SERVICE_ACCOUNT=$4 + +if kubectl get serviceaccount $SERVICE_ACCOUNT --namespace=$NAMESPACE &> /dev/null; then + echo "$SERVICE_ACCOUNT in $NAMESPACE already exists. Skipping creation." +else + echo "Creating $SERVICE_ACCOUNT in $NAMESPACE " + kubectl create serviceaccount $SERVICE_ACCOUNT --namespace=$NAMESPACE + echo "$SERVICE_ACCOUNT in $NAMESPACE created" +fi + +echo "Binding $GCP_IAM_ROLE with kubernetes $SERVICE_ACCOUNT for workloadIdentityUser in $NAMESPACE" +gcloud iam service-accounts add-iam-policy-binding "${GCP_IAM_ROLE}@${PROJECT_ID}.iam.gserviceaccount.com" \ + --member="serviceAccount:${PROJECT_ID}.svc.id.goog[${NAMESPACE}/${SERVICE_ACCOUNT}]" \ + --role='roles/iam.workloadIdentityUser' +echo "Binding Complete" + + +echo "Annotating $GCP_IAM_ROLE with kubernetes $SERVICE_ACCOUNT for workloadIdentityUser in $NAMESPACE" +kubectl annotate serviceaccount "$SERVICE_ACCOUNT" \ + --namespace="$NAMESPACE" \ + iam.gke.io/gcp-service-account="$GCP_IAM_ROLE@$PROJECT_ID.iam.gserviceaccount.com" + +echo "Annotation Complete" diff --git a/k8s/common/values.prod.yaml b/k8s/common/values.prod.yaml index 7f1d7ba9..326c2e94 100644 --- a/k8s/common/values.prod.yaml +++ b/k8s/common/values.prod.yaml @@ -10,7 +10,7 @@ secret: projectID: biconomy-prod passphrase: key: sdk-prod-relayer-node-service-passphrase - name: CONFIG_PASSPHRASE + name: BUNDLER_CONFIG_PASSPHRASE version: latest secret_plain: diff --git a/k8s/common/values.staging.yaml b/k8s/common/values.staging.yaml index 186149bc..7d6b2766 100644 --- a/k8s/common/values.staging.yaml +++ b/k8s/common/values.staging.yaml @@ -10,7 +10,7 @@ secret: projectID: biconomy-staging passphrase: key: sdk-staging-relayer-node-service-passphrase - name: CONFIG_PASSPHRASE + name: BUNDLER_CONFIG_PASSPHRASE version: latest secret_plain: diff --git a/k8s/relayer/templates/statefulset.yaml b/k8s/relayer/templates/statefulset.yaml index 40a01892..735fe01a 100644 --- a/k8s/relayer/templates/statefulset.yaml +++ b/k8s/relayer/templates/statefulset.yaml @@ -49,6 +49,7 @@ spec: containers: - name: {{ .Values.relayer.name }}{{ .Values.index }} image: {{ .Values.relayer.image }} + imagePullPolicy: "Always" ports: - containerPort: {{ .Values.relayer.port }} env: @@ -66,10 +67,10 @@ spec: - secretRef: name: {{ .Values.relayer.name }}-passphrase volumeMounts: - - mountPath: "/relayer-node/config.json.enc" + - mountPath: "/home/nonroot/bundler/config.json.enc" name: secret-encrypted subPath: "config.json.enc" - - mountPath: "/relayer-node/config/{{ .Values.secret_plain.config.name }}" + - mountPath: "/home/nonroot/bundler/config/{{ .Values.secret_plain.config.name }}" name: secret-plain subPath: {{ .Values.secret_plain.config.name }} {{- if .Values.datadog.enable }} diff --git a/lightEntrypoint.sh b/lightEntrypoint.sh new file mode 100644 index 00000000..51018576 --- /dev/null +++ b/lightEntrypoint.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +# Set ORDINAL_INDEX from file +export NODE_PATH_INDEX=$(cat /etc/podinfo/ordinal_index) + +# Start Node.js application + +# Command to run the application +node server/dist/server/src/index.js \ No newline at end of file diff --git a/output.txt b/output.txt new file mode 100644 index 00000000..933b53f0 --- /dev/null +++ b/output.txt @@ -0,0 +1,2550 @@ +nodepath-m/44'/60'/0'/0/ , relayerIndex-0 , address-896aacdb5e0e280c39f61da7004f5f30993678c4 +nodepath-m/44'/60'/0'/0/ , relayerIndex-1 , address-ecdce66efc712bc26df264c3b6b9550821529e1b +nodepath-m/44'/60'/0'/0/ , relayerIndex-2 , address-10647ab5603b44258c6b4bfbcdcb2d74b2e7e645 +nodepath-m/44'/60'/0'/0/ , relayerIndex-3 , address-ac0d860f1a389aae7064b446163029a6c978a024 +nodepath-m/44'/60'/0'/0/ , relayerIndex-4 , address-639f1aa121b609505b54717d26a62cb49e7b4c16 +nodepath-m/44'/60'/0'/0/ , relayerIndex-5 , address-69bc988bf639e31626df3bc2164273128daa720f +nodepath-m/44'/60'/0'/0/ , relayerIndex-6 , address-ea5efdcd1f127d9536f611e75b05ce852aca4a3f +nodepath-m/44'/60'/0'/0/ , relayerIndex-7 , address-90259ccd7f8f6e2fa735e0a040cc7697234d54fb +nodepath-m/44'/60'/0'/0/ , relayerIndex-8 , address-a4d97741ad7ce428ddbcbb616c9d0e50e45c59b5 +nodepath-m/44'/60'/0'/0/ , relayerIndex-9 , address-b1414f0a9c02cef7901f3acc782779e8d16ddc03 +nodepath-m/44'/60'/0'/0/ , relayerIndex-10 , address-8fac67d5d53d97a67147fac0ef257291b8c353b7 +nodepath-m/44'/60'/0'/0/ , relayerIndex-11 , address-7e421aafd6ed11fb752f6515d0664439cecd0e28 +nodepath-m/44'/60'/0'/0/ , relayerIndex-12 , address-ed300adff8099a5d7edddaf4d04b4029c4f8d20d +nodepath-m/44'/60'/0'/0/ , relayerIndex-13 , address-e0de8ea837ec785e4a067dfe934ba4e726a18b0b +nodepath-m/44'/60'/0'/0/ , relayerIndex-14 , address-efcd727277c7393263414daea4dd30255421908b +nodepath-m/44'/60'/0'/0/ , relayerIndex-15 , address-e8f7e5b69fe956f0f383f6e24607cfbfc1af9e11 +nodepath-m/44'/60'/0'/0/ , relayerIndex-16 , address-48625b13e3dfba4f44a9316efa54744dc87c9457 +nodepath-m/44'/60'/0'/0/ , relayerIndex-17 , address-29908bd7c2dcb370ab9293de1db1b9538bd5dd4a +nodepath-m/44'/60'/0'/0/ , relayerIndex-18 , address-423fa0095ab3144275176c73859f4de96402d24d +nodepath-m/44'/60'/0'/0/ , relayerIndex-19 , address-9acc78a48f045267aae8e3070d4a1972cf9c14d7 +nodepath-m/44'/60'/0'/0/ , relayerIndex-20 , address-06623481ef8443541601906bfb3697e686b9af73 +nodepath-m/44'/60'/0'/0/ , relayerIndex-21 , address-9deee35d7b655fe6da126455de6f1adb264d0fb2 +nodepath-m/44'/60'/0'/0/ , relayerIndex-22 , address-6dfa6daf22f891a8be7230e73deb3a97e4ea021d +nodepath-m/44'/60'/0'/0/ , relayerIndex-23 , address-4b65ae28f847d010ca87e5d5a9e333d52e6dfa38 +nodepath-m/44'/60'/0'/0/ , relayerIndex-24 , address-4adfc4d3f70a0e82b3d63c5efaa5e14418664d3e +nodepath-m/44'/60'/0'/0/ , relayerIndex-25 , address-c5b9fb66b7b0fea2b1ac4a99fe392eb7b463401b +nodepath-m/44'/60'/0'/0/ , relayerIndex-26 , address-f71ed55e3ba72b1915d01f3157b1c36ebe003b56 +nodepath-m/44'/60'/0'/0/ , relayerIndex-27 , address-e127920e3600bbd749c8f7ccea6417e02b39b6fe +nodepath-m/44'/60'/0'/0/ , relayerIndex-28 , address-a30b26162fd969e3cffced88fa646bf8e427c4ea +nodepath-m/44'/60'/0'/0/ , relayerIndex-29 , address-0e86ec55b250e01b647b81de66d5fc6f02c8502d +nodepath-m/44'/60'/0'/0/ , relayerIndex-30 , address-5086888e460b34a03eff1bb63ecc1ac498c37216 +nodepath-m/44'/60'/0'/0/ , relayerIndex-31 , address-f1716ef5ab2fc6a12d7fbeb9750a0f2b2b3a1c7e +nodepath-m/44'/60'/0'/0/ , relayerIndex-32 , address-522a32f83dd0315aed7925a940412282389c732d +nodepath-m/44'/60'/0'/0/ , relayerIndex-33 , address-2f770d7c70d760aa5729ea434785d5afbf688f3c +nodepath-m/44'/60'/0'/0/ , relayerIndex-34 , address-64325510295557bc90bad3ff4538d5e33d36c99a +nodepath-m/44'/60'/0'/0/ , relayerIndex-35 , address-06e9318b0c2998a70a36cc1d7886336b3381b35b +nodepath-m/44'/60'/0'/0/ , relayerIndex-36 , address-ec615cd005a43681c3a40da9a63053202c57cac7 +nodepath-m/44'/60'/0'/0/ , relayerIndex-37 , address-822fc09f24e52dbf7a0c8e8ef77e8b28117451cb +nodepath-m/44'/60'/0'/0/ , relayerIndex-38 , address-8c11c032e81096701ecac63104b1e972e5c7f788 +nodepath-m/44'/60'/0'/0/ , relayerIndex-39 , address-887b53237b664bbed07174f2a6104f1a26e7211e +nodepath-m/44'/60'/0'/0/ , relayerIndex-40 , address-d6b13fcd7ca8ac80bac4481d3f45e8822413ce71 +nodepath-m/44'/60'/0'/0/ , relayerIndex-41 , address-cfa3e71504c1dc008668bf9932d38ae2f7ee1d98 +nodepath-m/44'/60'/0'/0/ , relayerIndex-42 , address-3bb10d5e8c1c1da3e3f1830a32b1a5d47196a904 +nodepath-m/44'/60'/0'/0/ , relayerIndex-43 , address-73034a8ea10a84d17d965c7868758e11b16a21cd +nodepath-m/44'/60'/0'/0/ , relayerIndex-44 , address-0995844545aa1720f9b1a53934a82c477bedfe2f +nodepath-m/44'/60'/0'/0/ , relayerIndex-45 , address-469166b115fdfdb9b23ac1ecc42b1e87f1869848 +nodepath-m/44'/60'/0'/0/ , relayerIndex-46 , address-2ce88c97df8bbd94e99cbb7f39268496fe9f084b +nodepath-m/44'/60'/0'/0/ , relayerIndex-47 , address-ae7bc34c0c15eb7a0e5529c10dc6b990d7d609af +nodepath-m/44'/60'/0'/0/ , relayerIndex-48 , address-d708237e346fef1d359b1acdc8de38d47b6a0d0b +nodepath-m/44'/60'/0'/0/ , relayerIndex-49 , address-919a03fcb390619db2821d0277f45e2b3f88f720 +nodepath-m/44'/60'/0'/1/ , relayerIndex-0 , address-75c033055cd4f144d51b59b35dae02d91be03e74 +nodepath-m/44'/60'/0'/1/ , relayerIndex-1 , address-9fa16120f63ac171f834a63da11c7612f4a123a4 +nodepath-m/44'/60'/0'/1/ , relayerIndex-2 , address-95a35bc49ebcfed3567f61810f05831101e19a6a +nodepath-m/44'/60'/0'/1/ , relayerIndex-3 , address-3605ee9b0819caa8b7f34dcb2d74ade708103bad +nodepath-m/44'/60'/0'/1/ , relayerIndex-4 , address-94f0942a8ea9b00f10f7c3a3a036d49d1dc6d9c8 +nodepath-m/44'/60'/0'/1/ , relayerIndex-5 , address-2155e5842f39add695cd505b7ca4c742bf428042 +nodepath-m/44'/60'/0'/1/ , relayerIndex-6 , address-46634a3c869ed69565773ac0d99c17ea53486672 +nodepath-m/44'/60'/0'/1/ , relayerIndex-7 , address-642339eb27fee9fe6474592ed22810821542efba +nodepath-m/44'/60'/0'/1/ , relayerIndex-8 , address-6ce5a7a3e3bc82a8538ac76b8585478011eb3479 +nodepath-m/44'/60'/0'/1/ , relayerIndex-9 , address-59e11bea4e6c72b12397da2e3a717155d9af75e1 +nodepath-m/44'/60'/0'/1/ , relayerIndex-10 , address-5c2dc9d7c9f9b089aa0fec5f25a4d38a4c7ab453 +nodepath-m/44'/60'/0'/1/ , relayerIndex-11 , address-303be31e6ec970fcf17093a0f8b491d76c345363 +nodepath-m/44'/60'/0'/1/ , relayerIndex-12 , address-8980f55284dbc0b54c981873860bcb6b9131836a +nodepath-m/44'/60'/0'/1/ , relayerIndex-13 , address-e3fd70d274d5b1ca77332da27d93e9974695454b +nodepath-m/44'/60'/0'/1/ , relayerIndex-14 , address-66d5a047815af6a429097558836980bf34f01ede +nodepath-m/44'/60'/0'/1/ , relayerIndex-15 , address-1587d14edf9b3bd375f789d3b5f510a6d11cfb11 +nodepath-m/44'/60'/0'/1/ , relayerIndex-16 , address-55b0f45ee51c0c04d75ad807def13932a3b2b408 +nodepath-m/44'/60'/0'/1/ , relayerIndex-17 , address-34598a974b9c1481c18504030b350f721fce6d66 +nodepath-m/44'/60'/0'/1/ , relayerIndex-18 , address-145113f5140d907a8cc357dab99f52c908636baf +nodepath-m/44'/60'/0'/1/ , relayerIndex-19 , address-2451b388dd2d895ed0e63991406cc3857ff7e01b +nodepath-m/44'/60'/0'/1/ , relayerIndex-20 , address-9ca9459143c9e4791e7bf8b201959989edabdfad +nodepath-m/44'/60'/0'/1/ , relayerIndex-21 , address-0a9cde236393ba649eec002ce0bd4784bc3355ef +nodepath-m/44'/60'/0'/1/ , relayerIndex-22 , address-eef7666cf8361592eb7e11ede2cf9688ae7a2493 +nodepath-m/44'/60'/0'/1/ , relayerIndex-23 , address-1a638d0c457d4332455a9411a5cfe61d1677c5b8 +nodepath-m/44'/60'/0'/1/ , relayerIndex-24 , address-b5b0ca38ba769114b97c4c2fd2361c8ea766ad68 +nodepath-m/44'/60'/0'/1/ , relayerIndex-25 , address-08de7d34e6df539983956f7b3452cc723d166dbd +nodepath-m/44'/60'/0'/1/ , relayerIndex-26 , address-1a9b7c08d3a4369455b86cb038bec0bc9300af43 +nodepath-m/44'/60'/0'/1/ , relayerIndex-27 , address-fad16550baac2501f886308f9bb032eafc9c11c8 +nodepath-m/44'/60'/0'/1/ , relayerIndex-28 , address-021b66514701983c0cd58c732cb3264815be94b8 +nodepath-m/44'/60'/0'/1/ , relayerIndex-29 , address-d9703df7a67a2faab5bb0c3ebb505014386524dd +nodepath-m/44'/60'/0'/1/ , relayerIndex-30 , address-01b1b7d13f4e04500f25edaef69161e80c4c42b5 +nodepath-m/44'/60'/0'/1/ , relayerIndex-31 , address-6d5c4cc7994883dfb3b3c91a7816429a86f9c34f +nodepath-m/44'/60'/0'/1/ , relayerIndex-32 , address-ac405ae3a4b2e1e614454da7deb3be51139243b8 +nodepath-m/44'/60'/0'/1/ , relayerIndex-33 , address-3aabe8cddcebdfb1e09b1342b233b3eb6aefd9fa +nodepath-m/44'/60'/0'/1/ , relayerIndex-34 , address-85c221534727ef84c4e092dbf2ff29ecb4ed522f +nodepath-m/44'/60'/0'/1/ , relayerIndex-35 , address-615264de168aefb904fdc7f22c89e20fcabdc209 +nodepath-m/44'/60'/0'/1/ , relayerIndex-36 , address-ce8780c30f2ad567f27f3e379853010d5aa9b663 +nodepath-m/44'/60'/0'/1/ , relayerIndex-37 , address-3df84597484aeeb0d87ef1dd4b6070aca7028860 +nodepath-m/44'/60'/0'/1/ , relayerIndex-38 , address-8a51152900f24f00c9022556ec28869198513d00 +nodepath-m/44'/60'/0'/1/ , relayerIndex-39 , address-4879e335837a645e5d7bd88c544b4fb65320688f +nodepath-m/44'/60'/0'/1/ , relayerIndex-40 , address-e04db87c32ff2e29df9fc701d6988a63a91a934e +nodepath-m/44'/60'/0'/1/ , relayerIndex-41 , address-f9f536936ab10a6fb6160afd2c875dbd4ba0ba1c +nodepath-m/44'/60'/0'/1/ , relayerIndex-42 , address-0e09387478acd285420c1e87ef0f7eb9c9595c7e +nodepath-m/44'/60'/0'/1/ , relayerIndex-43 , address-5c589af3362086d0d184f6af9deefd197885a118 +nodepath-m/44'/60'/0'/1/ , relayerIndex-44 , address-1775e6693276e262dcef1ad435f530b3c35ad8b0 +nodepath-m/44'/60'/0'/1/ , relayerIndex-45 , address-ae0cc79ffbaea1e7181f00855249e60a34b11956 +nodepath-m/44'/60'/0'/1/ , relayerIndex-46 , address-23fa59f7f936b6ee0aec43ec21dc79f14358a306 +nodepath-m/44'/60'/0'/1/ , relayerIndex-47 , address-e821b2ad91baf1477615e0f0e1da6ea181a6bc11 +nodepath-m/44'/60'/0'/1/ , relayerIndex-48 , address-809c46e61bf057c3d0bb4f9a4931d78de18d9d69 +nodepath-m/44'/60'/0'/1/ , relayerIndex-49 , address-db03fb9f66c42f13821142bc64de83398d6c56d9 +nodepath-m/44'/60'/0'/2/ , relayerIndex-0 , address-5ebb52f9c7a863321c604e961110b13344838858 +nodepath-m/44'/60'/0'/2/ , relayerIndex-1 , address-4b56e03826fe1c4ae440275a93dc8b25babe19d3 +nodepath-m/44'/60'/0'/2/ , relayerIndex-2 , address-0ab2a99288a2c43bac1a60f5bf11a54dbc321314 +nodepath-m/44'/60'/0'/2/ , relayerIndex-3 , address-8ef2911616a500a524907df3a468b19ced8799b0 +nodepath-m/44'/60'/0'/2/ , relayerIndex-4 , address-7896f7ae7a2eda9ecde3b98d7f0f5704720a1a30 +nodepath-m/44'/60'/0'/2/ , relayerIndex-5 , address-1971653a861c5ed51c1cb526c42c3240368797f9 +nodepath-m/44'/60'/0'/2/ , relayerIndex-6 , address-9282da40d7aa02e40efe1421b85cd3e31b35d97d +nodepath-m/44'/60'/0'/2/ , relayerIndex-7 , address-5ef2fe68a25e450fa8da07005d4a541a218f8876 +nodepath-m/44'/60'/0'/2/ , relayerIndex-8 , address-ce3829e21a39f0d31ec51f573f1378798dc2faa5 +nodepath-m/44'/60'/0'/2/ , relayerIndex-9 , address-80874d0204b52f98a9dd75e0a835f40398c2fcc6 +nodepath-m/44'/60'/0'/2/ , relayerIndex-10 , address-6cb01e0c8e4a3e06b4e78ab07ebc0f81f80dfbf3 +nodepath-m/44'/60'/0'/2/ , relayerIndex-11 , address-b4d47499159f72844476eb0f8d4c54e1629d8daf +nodepath-m/44'/60'/0'/2/ , relayerIndex-12 , address-ec7de89d30354abc1ebc422a6e17ec1fb361c1ca +nodepath-m/44'/60'/0'/2/ , relayerIndex-13 , address-44a4f174d2d6192875188c32b192b0621f0198de +nodepath-m/44'/60'/0'/2/ , relayerIndex-14 , address-63885a68c7b00b3ad005a1298e26d2af33e37f5c +nodepath-m/44'/60'/0'/2/ , relayerIndex-15 , address-f9b1f73ea37714f65fd5ae6eaf4d01e203daaa0f +nodepath-m/44'/60'/0'/2/ , relayerIndex-16 , address-dcce9cbaa5f595ff9767b0442adcc741d763d64b +nodepath-m/44'/60'/0'/2/ , relayerIndex-17 , address-cecdb729aa3281e974a7e41019da7c214dadbd16 +nodepath-m/44'/60'/0'/2/ , relayerIndex-18 , address-46edc6fd695a1ad17ad6184c4c159764b2cfad6d +nodepath-m/44'/60'/0'/2/ , relayerIndex-19 , address-85db9709721f3a51c6640f9ee4aba8d123c68e10 +nodepath-m/44'/60'/0'/2/ , relayerIndex-20 , address-a5fa8a6837aa51010106c1f2c65592ebf74deed0 +nodepath-m/44'/60'/0'/2/ , relayerIndex-21 , address-9fec3254a8efff71a2e5f53a9dc3060a963ff973 +nodepath-m/44'/60'/0'/2/ , relayerIndex-22 , address-433facccdeb41e99ff0a0fd4509458a5b02bb239 +nodepath-m/44'/60'/0'/2/ , relayerIndex-23 , address-0123066b684e2b534c82137436d844b2e7c1b298 +nodepath-m/44'/60'/0'/2/ , relayerIndex-24 , address-a990f11ac344f7b154ec8b464b15202e2f0d13a9 +nodepath-m/44'/60'/0'/2/ , relayerIndex-25 , address-575b92b245094d02e7b32670c42ff226e834f516 +nodepath-m/44'/60'/0'/2/ , relayerIndex-26 , address-787275ea711e4dd7456557ae25f8a04e3e10393b +nodepath-m/44'/60'/0'/2/ , relayerIndex-27 , address-86983b9793773a8547ba66212ad61c7802aac263 +nodepath-m/44'/60'/0'/2/ , relayerIndex-28 , address-ae9a0fb42d88d5a5081c3f81fabbf3e79552a1e5 +nodepath-m/44'/60'/0'/2/ , relayerIndex-29 , address-cd61a72da06b9d5c28a72fad03573219cc6ede76 +nodepath-m/44'/60'/0'/2/ , relayerIndex-30 , address-06b38ae87e77a833de50076e116205567fbc7f79 +nodepath-m/44'/60'/0'/2/ , relayerIndex-31 , address-71116de6d1536d438fddbe12ab204c14d45403cb +nodepath-m/44'/60'/0'/2/ , relayerIndex-32 , address-468d921e0cfde53eb57bad987797b6fb1bea1090 +nodepath-m/44'/60'/0'/2/ , relayerIndex-33 , address-a326771d642d400945403673c582a2d1ce958a95 +nodepath-m/44'/60'/0'/2/ , relayerIndex-34 , address-f8954728cc984dd54229c33babdcaba412ff43f1 +nodepath-m/44'/60'/0'/2/ , relayerIndex-35 , address-086d6819c7ba94e965c074f8c3df7664646a66e5 +nodepath-m/44'/60'/0'/2/ , relayerIndex-36 , address-f3d21d679772f6c9b71ee1f7666ab8ba3e39a8ab +nodepath-m/44'/60'/0'/2/ , relayerIndex-37 , address-b51bac3bbe6be024e05d3430edb486252dc49701 +nodepath-m/44'/60'/0'/2/ , relayerIndex-38 , address-4b70631f924e9368c72017d8c51750adac4a10b8 +nodepath-m/44'/60'/0'/2/ , relayerIndex-39 , address-bca23ede9d5fe129e2733240332cc84b4f542814 +nodepath-m/44'/60'/0'/2/ , relayerIndex-40 , address-8ceddfc455c33332dcc1b7dda63d0c29eaec0887 +nodepath-m/44'/60'/0'/2/ , relayerIndex-41 , address-b1cba1a11fa30d8da80e8bf20d72dbea46c0c74f +nodepath-m/44'/60'/0'/2/ , relayerIndex-42 , address-b78c094f5cb12f1bec7cd8779d5d8fd0faa1205e +nodepath-m/44'/60'/0'/2/ , relayerIndex-43 , address-ba7d3b2b59850d0d52c52e18a3313cceef62d5de +nodepath-m/44'/60'/0'/2/ , relayerIndex-44 , address-5502d623c8073e53f3a92a01f77619765de37256 +nodepath-m/44'/60'/0'/2/ , relayerIndex-45 , address-83ee2513758e21bdd76f1e59b85341f4cc4ce4b9 +nodepath-m/44'/60'/0'/2/ , relayerIndex-46 , address-011626a427a13a3262b33fcd4eefa6f9a6b9d87d +nodepath-m/44'/60'/0'/2/ , relayerIndex-47 , address-e671c578deb3cf22aa821640d8696e5f7432ea73 +nodepath-m/44'/60'/0'/2/ , relayerIndex-48 , address-1b36773d51e5934cb5cae5301066ddad337989df +nodepath-m/44'/60'/0'/2/ , relayerIndex-49 , address-316e9dbc3d680ecd9b05d9ff054f802cc2cf01cf +nodepath-m/44'/60'/0'/3/ , relayerIndex-0 , address-2c149fb3f79f3d33dfadb565ad221c882747985e +nodepath-m/44'/60'/0'/3/ , relayerIndex-1 , address-9b62221407af06f3420d68dc61134733f17b5e85 +nodepath-m/44'/60'/0'/3/ , relayerIndex-2 , address-553c65d72a7669729a55f8cfdda227eb7a5545f0 +nodepath-m/44'/60'/0'/3/ , relayerIndex-3 , address-d4d079e9a790ba72887a3c3bda8e432284c17427 +nodepath-m/44'/60'/0'/3/ , relayerIndex-4 , address-34f83c6093efd79038998ea1ff19e0a549762e67 +nodepath-m/44'/60'/0'/3/ , relayerIndex-5 , address-bf6899985b042bf3bff489ba14f2a7d86667ba0e +nodepath-m/44'/60'/0'/3/ , relayerIndex-6 , address-1f173847adfa285ca704e7dd421a01a2dc2b852f +nodepath-m/44'/60'/0'/3/ , relayerIndex-7 , address-51e9099205d0256f7b104658edd03111709c5522 +nodepath-m/44'/60'/0'/3/ , relayerIndex-8 , address-da7ee976822af6636fe746b1dce39bf8dc1fb6dc +nodepath-m/44'/60'/0'/3/ , relayerIndex-9 , address-e028a76f0708bb4a6a928391598731a00b1f9b68 +nodepath-m/44'/60'/0'/3/ , relayerIndex-10 , address-651513fd218d051b28c20e31055ac172af623446 +nodepath-m/44'/60'/0'/3/ , relayerIndex-11 , address-c8a4f98176f0550ed2a80adef860c916cd15a18c +nodepath-m/44'/60'/0'/3/ , relayerIndex-12 , address-16ddaaf59375ecfca27bc94d9d52892cb97b0a60 +nodepath-m/44'/60'/0'/3/ , relayerIndex-13 , address-2ff0b4bd0b14782fff947816a900617b01100049 +nodepath-m/44'/60'/0'/3/ , relayerIndex-14 , address-f9464d9cb85185e463ceb61db35f864bf3940103 +nodepath-m/44'/60'/0'/3/ , relayerIndex-15 , address-009515d9715b2d4ad8e97cbb742271adbebac9cb +nodepath-m/44'/60'/0'/3/ , relayerIndex-16 , address-9a5da9326ebbf1bf28b294326872ad07d98374fa +nodepath-m/44'/60'/0'/3/ , relayerIndex-17 , address-43c043a1c5984b1a1527493b333021ff88949902 +nodepath-m/44'/60'/0'/3/ , relayerIndex-18 , address-1f57ef497ca16035cb6ff0e11aeea038192e0a6c +nodepath-m/44'/60'/0'/3/ , relayerIndex-19 , address-0df1e3516f72b6836cdc6da24aad6e1de7d31bc8 +nodepath-m/44'/60'/0'/3/ , relayerIndex-20 , address-c0108faa7065b831bb9d419cedf5d10f7644d76f +nodepath-m/44'/60'/0'/3/ , relayerIndex-21 , address-f3334558f324b64196a74171cd0675a3abff1239 +nodepath-m/44'/60'/0'/3/ , relayerIndex-22 , address-e645ea1013b028eb59e3eaccc8fc711a2734d3c5 +nodepath-m/44'/60'/0'/3/ , relayerIndex-23 , address-dc7a7beebd8b7e318e61e5d9c90b901c822ed0ac +nodepath-m/44'/60'/0'/3/ , relayerIndex-24 , address-f1fae5f39352c861c041fa8c90c640b569523006 +nodepath-m/44'/60'/0'/3/ , relayerIndex-25 , address-1c6e6aa323c8da07abf66122103c78b30fa574b5 +nodepath-m/44'/60'/0'/3/ , relayerIndex-26 , address-587ca57c4e19887a44190026dc97dd2f67d67d24 +nodepath-m/44'/60'/0'/3/ , relayerIndex-27 , address-cd6a889b0bc988b8c10799097541213e49ebe19e +nodepath-m/44'/60'/0'/3/ , relayerIndex-28 , address-e5da5adfe7191fdb84ac4ab49b661958f50f1e21 +nodepath-m/44'/60'/0'/3/ , relayerIndex-29 , address-965ba079ac44ba577c5081932366d87aac2b5b35 +nodepath-m/44'/60'/0'/3/ , relayerIndex-30 , address-081708ab403709178d283a86a9d72236072a229e +nodepath-m/44'/60'/0'/3/ , relayerIndex-31 , address-d77b2652ca86ba5dea885be6eab432a2623a1ddc +nodepath-m/44'/60'/0'/3/ , relayerIndex-32 , address-f500c2e5b8cb31c4dd4266985573fd4cad0ac8f8 +nodepath-m/44'/60'/0'/3/ , relayerIndex-33 , address-272eb77fc31a1ffaff943fc25dd6baabe57005f2 +nodepath-m/44'/60'/0'/3/ , relayerIndex-34 , address-66d314dc7353e997c2c798ba5e3683753d5380c1 +nodepath-m/44'/60'/0'/3/ , relayerIndex-35 , address-155da39baf6724ea5f0855839629bfbf591f54ff +nodepath-m/44'/60'/0'/3/ , relayerIndex-36 , address-e35d7474e9dd9948506aae2cd7c74ed316802fb7 +nodepath-m/44'/60'/0'/3/ , relayerIndex-37 , address-a5904670cd736b352d3ed568ce6a7218f854fdaa +nodepath-m/44'/60'/0'/3/ , relayerIndex-38 , address-7b82d9926521ced5812fb802960a0ccfa054893c +nodepath-m/44'/60'/0'/3/ , relayerIndex-39 , address-98c983ad7ab9b6c723033ee3a2d9f1701dff95f8 +nodepath-m/44'/60'/0'/3/ , relayerIndex-40 , address-cd1f15d5446b1a4e59af65c9ba3ef74c0dfb7b0d +nodepath-m/44'/60'/0'/3/ , relayerIndex-41 , address-b773363f2c0f43f28f6b6ebfdc5a0917de024f98 +nodepath-m/44'/60'/0'/3/ , relayerIndex-42 , address-e408b5c8beb5577e7ff04143a80280c4c1a94a73 +nodepath-m/44'/60'/0'/3/ , relayerIndex-43 , address-9708476a1efcea56ff38966f69245df00520cb2a +nodepath-m/44'/60'/0'/3/ , relayerIndex-44 , address-2b785329fad3e8395191d2578983d4e9f46f2c48 +nodepath-m/44'/60'/0'/3/ , relayerIndex-45 , address-7e31a0e69e1f9c50706858953212c593ed62f74a +nodepath-m/44'/60'/0'/3/ , relayerIndex-46 , address-98d3a548cbdcc9ed0807657fd26a823ba84cba88 +nodepath-m/44'/60'/0'/3/ , relayerIndex-47 , address-10ca8cb061d02d91548a7c12a96857758af190e7 +nodepath-m/44'/60'/0'/3/ , relayerIndex-48 , address-098e4a937cbf3c79d82f03ee488a93760c7d0335 +nodepath-m/44'/60'/0'/3/ , relayerIndex-49 , address-8e66d474bc8b3b793a80b02eef0546e8a8fd4037 +nodepath-m/44'/60'/0'/4/ , relayerIndex-0 , address-d5b01c1d87365213fa88a3a9635813ad97b86bf7 +nodepath-m/44'/60'/0'/4/ , relayerIndex-1 , address-e27dc76c935e5b2014ff1b218b1bcc9a98d66f5e +nodepath-m/44'/60'/0'/4/ , relayerIndex-2 , address-9096f6d6d01b2dc7fa975d091a4d63f9cbc45a36 +nodepath-m/44'/60'/0'/4/ , relayerIndex-3 , address-a30b95cc189a2074a404b28c41e73d6a7ae2d64d +nodepath-m/44'/60'/0'/4/ , relayerIndex-4 , address-2beeb07c2bb51caf3bb61df96c1639a9c1172f63 +nodepath-m/44'/60'/0'/4/ , relayerIndex-5 , address-3287796b08e8063c1f1d57f3a9ab598cdbd8fc33 +nodepath-m/44'/60'/0'/4/ , relayerIndex-6 , address-16585e0abfd40d571d9ee73e18389b495878ea47 +nodepath-m/44'/60'/0'/4/ , relayerIndex-7 , address-3814a86f2bf20cf98dd1c2d30941b947883651ff +nodepath-m/44'/60'/0'/4/ , relayerIndex-8 , address-78be442c4c4e0e824e9494c5c604f8144030b204 +nodepath-m/44'/60'/0'/4/ , relayerIndex-9 , address-6a9f72629fc496e50105e6e1cf307407659f6494 +nodepath-m/44'/60'/0'/4/ , relayerIndex-10 , address-a1ba7846ae6059917166752dda062c1b41b7a758 +nodepath-m/44'/60'/0'/4/ , relayerIndex-11 , address-d0f5dfcfdfc1595c7d82297feefa21655fc07314 +nodepath-m/44'/60'/0'/4/ , relayerIndex-12 , address-a053da24c4a6968482cbe399064de509a3bbdec2 +nodepath-m/44'/60'/0'/4/ , relayerIndex-13 , address-5bcf5bc2bf90e2b486f8bd9104bc423d0de9a25e +nodepath-m/44'/60'/0'/4/ , relayerIndex-14 , address-ca719c11ffebf1d32045e07ce7a36f59660fb9b1 +nodepath-m/44'/60'/0'/4/ , relayerIndex-15 , address-51e40d7efd6587f1b1dc18b98f91f07388d44559 +nodepath-m/44'/60'/0'/4/ , relayerIndex-16 , address-7d224228dd8650bdfb887db645babd8c3cb4753c +nodepath-m/44'/60'/0'/4/ , relayerIndex-17 , address-f1d9f2cd603bb0c89ff7335abcb7389853d29e52 +nodepath-m/44'/60'/0'/4/ , relayerIndex-18 , address-249fad7e929ee64eae8c43b07a593ac12fe327c4 +nodepath-m/44'/60'/0'/4/ , relayerIndex-19 , address-92562e8f2964867aa30b1d1e4fee09d0d37ef25a +nodepath-m/44'/60'/0'/4/ , relayerIndex-20 , address-3fdf498842208ee5a1fb6dd0b9072b0d47c2bf46 +nodepath-m/44'/60'/0'/4/ , relayerIndex-21 , address-ee78787591f2a6b4f68983e8463fd6cea1e71ff6 +nodepath-m/44'/60'/0'/4/ , relayerIndex-22 , address-820ba0e40d981837ef37c72fdeb4131fe1b085aa +nodepath-m/44'/60'/0'/4/ , relayerIndex-23 , address-3e49eeaf10b2a15f3e6b18830b65e1d4ac36fb21 +nodepath-m/44'/60'/0'/4/ , relayerIndex-24 , address-87b06c209558054d286d35d59101ed9fcd4947c9 +nodepath-m/44'/60'/0'/4/ , relayerIndex-25 , address-b91b0884fb5b49cd3124318774c7e5d9604f648d +nodepath-m/44'/60'/0'/4/ , relayerIndex-26 , address-43801b1b84d96378eeee490e03f4b257ac3da94d +nodepath-m/44'/60'/0'/4/ , relayerIndex-27 , address-5854b0803fafe65a026a08ab7d0e705404631b3b +nodepath-m/44'/60'/0'/4/ , relayerIndex-28 , address-ff63535636cadecd9f179574600cf4c9aeda3138 +nodepath-m/44'/60'/0'/4/ , relayerIndex-29 , address-9c4f9810b742ba34dc8fc1741a8f92f2dd29c067 +nodepath-m/44'/60'/0'/4/ , relayerIndex-30 , address-36da853283450c9594913c10adaddb9386362f35 +nodepath-m/44'/60'/0'/4/ , relayerIndex-31 , address-0a3a10da5131a1bc2c38eb5ad5c03d8960a8d6cb +nodepath-m/44'/60'/0'/4/ , relayerIndex-32 , address-aaf27e225fd06deb93cd208b92a60a1dbcf05451 +nodepath-m/44'/60'/0'/4/ , relayerIndex-33 , address-f81d73de9801f32918394b85da1ba983336121fa +nodepath-m/44'/60'/0'/4/ , relayerIndex-34 , address-e918dbc11c7922b646dafff9e86afbe9a95b3997 +nodepath-m/44'/60'/0'/4/ , relayerIndex-35 , address-69d2662d73afa7244ad9e7c70826845085951d9f +nodepath-m/44'/60'/0'/4/ , relayerIndex-36 , address-4332c81f499661a1227ee6b45932f59d6605fa5c +nodepath-m/44'/60'/0'/4/ , relayerIndex-37 , address-92fed236fb745614873513353ed6dd9747409f64 +nodepath-m/44'/60'/0'/4/ , relayerIndex-38 , address-714d8a512dda67d7146ce22ecb6ebd435c84680b +nodepath-m/44'/60'/0'/4/ , relayerIndex-39 , address-fdd532b16f2c40510027cc3f21181c01dcc33f2c +nodepath-m/44'/60'/0'/4/ , relayerIndex-40 , address-676646ac1e0cfb772f7e54cb6e1043c3967901b6 +nodepath-m/44'/60'/0'/4/ , relayerIndex-41 , address-da4faa929f74a476a343897ab48a60a2d9a47ee5 +nodepath-m/44'/60'/0'/4/ , relayerIndex-42 , address-54600e48c25d80efc4fd4f38c4b7da36b59cbc09 +nodepath-m/44'/60'/0'/4/ , relayerIndex-43 , address-965b10cd20ae1ce1789ed6fc96d617b6b9d65b74 +nodepath-m/44'/60'/0'/4/ , relayerIndex-44 , address-cf44e669b0d19c904e452d620fedbe495a03026c +nodepath-m/44'/60'/0'/4/ , relayerIndex-45 , address-1f63c27a83e469e4a41510b32f72817cd63fb3a9 +nodepath-m/44'/60'/0'/4/ , relayerIndex-46 , address-14ab2fa46f9a8add3579a8ace0865710eaed7009 +nodepath-m/44'/60'/0'/4/ , relayerIndex-47 , address-ad9f40bd2a44c1541636541808c087aca81a32d6 +nodepath-m/44'/60'/0'/4/ , relayerIndex-48 , address-5602d42f160008c03bfdaf7be6b88bb233193e55 +nodepath-m/44'/60'/0'/4/ , relayerIndex-49 , address-2f2e6ee3334e2edda7f6e6a029c75ba17b43a2dc +nodepath-m/44'/60'/0'/5/ , relayerIndex-0 , address-6bb3c82eaffc54962d6ca9ef674b4a6121fc6992 +nodepath-m/44'/60'/0'/5/ , relayerIndex-1 , address-a415cf24c12e318a4be5c7820aade29bcb6109d5 +nodepath-m/44'/60'/0'/5/ , relayerIndex-2 , address-a4d206e6a7b9a2c8e4a225e535972ee5da0e564c +nodepath-m/44'/60'/0'/5/ , relayerIndex-3 , address-99fa2851c0fe362a4a563d6419279b1dde3d7344 +nodepath-m/44'/60'/0'/5/ , relayerIndex-4 , address-59f0eac4390a97b2e1c5a1e0cf67a7ccec8486a6 +nodepath-m/44'/60'/0'/5/ , relayerIndex-5 , address-23883e7c75a631f665003276d732cfebc3589765 +nodepath-m/44'/60'/0'/5/ , relayerIndex-6 , address-eed2f97ef5ff6e01649788d2d11c2bcad9a1e096 +nodepath-m/44'/60'/0'/5/ , relayerIndex-7 , address-c0ca22a60fc1f6b91f39efd72c003a9c75bc2d0c +nodepath-m/44'/60'/0'/5/ , relayerIndex-8 , address-9fdbb64b79293404660c356ff563599c01c23038 +nodepath-m/44'/60'/0'/5/ , relayerIndex-9 , address-cef2f0444e52ae3069fee255fcb6e206ce82c12a +nodepath-m/44'/60'/0'/5/ , relayerIndex-10 , address-5f16237556f59fbb9dfa573ad7225a53a9601657 +nodepath-m/44'/60'/0'/5/ , relayerIndex-11 , address-cfec3b958b651bae8cca9459594e2295d8c5aae9 +nodepath-m/44'/60'/0'/5/ , relayerIndex-12 , address-67fb80c7edba37e348e608f9b9fd2f43433842e8 +nodepath-m/44'/60'/0'/5/ , relayerIndex-13 , address-56c7ef4755ba27ac4cb2a8d6f33267097914fc85 +nodepath-m/44'/60'/0'/5/ , relayerIndex-14 , address-c1ab105c62b8b935877758c5f0f9d86f11bcd5d6 +nodepath-m/44'/60'/0'/5/ , relayerIndex-15 , address-7ea94f9403d87a44a4021bd2b3cad32347a11120 +nodepath-m/44'/60'/0'/5/ , relayerIndex-16 , address-7aedc34aebeec7a17252300cd668a53ce64f83f3 +nodepath-m/44'/60'/0'/5/ , relayerIndex-17 , address-a496636087ae5b4102ca92cea3e3baea8b2cb6b4 +nodepath-m/44'/60'/0'/5/ , relayerIndex-18 , address-e62e330ed2947329d5054ae1bd6c7c1aaf5b8638 +nodepath-m/44'/60'/0'/5/ , relayerIndex-19 , address-20ae52c46e0eaaafbd1f0e4368033daf837b20d8 +nodepath-m/44'/60'/0'/5/ , relayerIndex-20 , address-24213ece5a4019c0024be72ab5ae8c8365165266 +nodepath-m/44'/60'/0'/5/ , relayerIndex-21 , address-2a5816d14f920dfb385ee6fd6a1836e96b802916 +nodepath-m/44'/60'/0'/5/ , relayerIndex-22 , address-3b026d97c244d89776cafe9322e5b4a4a3368d6a +nodepath-m/44'/60'/0'/5/ , relayerIndex-23 , address-223719c79a19ecb91e2e345e6dc515537c9e8b73 +nodepath-m/44'/60'/0'/5/ , relayerIndex-24 , address-e1ef60522df291b283c36e59b05f45eab337f0ab +nodepath-m/44'/60'/0'/5/ , relayerIndex-25 , address-79448ea42ff3961ba275b348f3acf6d3502eb69b +nodepath-m/44'/60'/0'/5/ , relayerIndex-26 , address-bfb74396bab2b60fae716346b416cd8afbc0d8f5 +nodepath-m/44'/60'/0'/5/ , relayerIndex-27 , address-ffb088d2b3bc0f0ac5796b1d1fbd9f9560057df7 +nodepath-m/44'/60'/0'/5/ , relayerIndex-28 , address-bd03cf30f2c1222ca67daf672d128b53fb69568c +nodepath-m/44'/60'/0'/5/ , relayerIndex-29 , address-9231da10d4dd2d3c8fe244641ac9cb1a8af32f98 +nodepath-m/44'/60'/0'/5/ , relayerIndex-30 , address-94d88a9cea4629dfc5ad37a23ee0a77b19b72d83 +nodepath-m/44'/60'/0'/5/ , relayerIndex-31 , address-c7454694d2460084976e3fc1dec8d983ff5e5fff +nodepath-m/44'/60'/0'/5/ , relayerIndex-32 , address-afe64194d92b7d4f40faba053b537f583d2169b4 +nodepath-m/44'/60'/0'/5/ , relayerIndex-33 , address-9a35dcf59c588df862f81c0cc674c63eb5bc4f7e +nodepath-m/44'/60'/0'/5/ , relayerIndex-34 , address-1e42571b3d58715b58afc570b451a122a5baf881 +nodepath-m/44'/60'/0'/5/ , relayerIndex-35 , address-602e136802d4a9cdb0cff2f845feb6d0142c4a41 +nodepath-m/44'/60'/0'/5/ , relayerIndex-36 , address-36269ce524b5c06a4570c40f8d453986dc67de61 +nodepath-m/44'/60'/0'/5/ , relayerIndex-37 , address-9094c518fffd60a8ea69668229983f776d674f6d +nodepath-m/44'/60'/0'/5/ , relayerIndex-38 , address-0b2723a69845c69dfb2f0ea06ddcdb62042e5c85 +nodepath-m/44'/60'/0'/5/ , relayerIndex-39 , address-eee77c8655dc042e16dff23597730dad6db81f3f +nodepath-m/44'/60'/0'/5/ , relayerIndex-40 , address-e909b33c5082172eb5376ce61bf7a499e47b56f9 +nodepath-m/44'/60'/0'/5/ , relayerIndex-41 , address-37412c8ddb09e3b3ecc71c4e00fd0cec4c062d68 +nodepath-m/44'/60'/0'/5/ , relayerIndex-42 , address-e6b55ae6c6ef43118f9f75e8d49ef41d10f5c1b5 +nodepath-m/44'/60'/0'/5/ , relayerIndex-43 , address-73ad7fafeee09a93c1e2837f07db5635cba02b05 +nodepath-m/44'/60'/0'/5/ , relayerIndex-44 , address-08024106f4e5ee50ea7f632463a55e1f7330179c +nodepath-m/44'/60'/0'/5/ , relayerIndex-45 , address-eb2acfb0cabda85b6cd5997e6395cee33e31e601 +nodepath-m/44'/60'/0'/5/ , relayerIndex-46 , address-cecf614e49abe8af7b2a088915269ea60380568e +nodepath-m/44'/60'/0'/5/ , relayerIndex-47 , address-dc14d44e076132c0715cd4a7aae50ae16323ec64 +nodepath-m/44'/60'/0'/5/ , relayerIndex-48 , address-eeb6db07fc16ae8ecc2a549e350de6131968cae5 +nodepath-m/44'/60'/0'/5/ , relayerIndex-49 , address-d1a7b31684adc71e269b334bfd5ed17b3e235576 +nodepath-m/44'/60'/0'/6/ , relayerIndex-0 , address-82015d402b8dd78fd671ec2b33f700a6bae7799f +nodepath-m/44'/60'/0'/6/ , relayerIndex-1 , address-86e633337f79734a67b3783231aab69515e2bb98 +nodepath-m/44'/60'/0'/6/ , relayerIndex-2 , address-bed413da6cf09958d4e334570cc7895b5397cc4f +nodepath-m/44'/60'/0'/6/ , relayerIndex-3 , address-672a4fb6a4d46e1ec9851ce4e53d1ae026759d08 +nodepath-m/44'/60'/0'/6/ , relayerIndex-4 , address-80fc438eb73504734e71223617354fcb0591aa81 +nodepath-m/44'/60'/0'/6/ , relayerIndex-5 , address-d5b588638ec7eaa03933a4c128454663b496eba9 +nodepath-m/44'/60'/0'/6/ , relayerIndex-6 , address-e43210a973c17106e3d29286cba380dc6a21b5e2 +nodepath-m/44'/60'/0'/6/ , relayerIndex-7 , address-61eacb13f8a9096a0f093f52719f629ee36334bd +nodepath-m/44'/60'/0'/6/ , relayerIndex-8 , address-b028b04d7db23767497ee4c5aa908562b4f331b6 +nodepath-m/44'/60'/0'/6/ , relayerIndex-9 , address-91252054bd80470690d1e78abd3aa8d381ea4c14 +nodepath-m/44'/60'/0'/6/ , relayerIndex-10 , address-84a5c11e0d51c566dec08183331e1445d6e84106 +nodepath-m/44'/60'/0'/6/ , relayerIndex-11 , address-952ec1cca6fc642b836f63e67bba1de978e8ecf7 +nodepath-m/44'/60'/0'/6/ , relayerIndex-12 , address-ec1fe07e0ab536278f54a957fd3ce5383a687f82 +nodepath-m/44'/60'/0'/6/ , relayerIndex-13 , address-d16e1af51b576909ef5adfb4d88c86962d305a9e +nodepath-m/44'/60'/0'/6/ , relayerIndex-14 , address-14b14afbafe7651d64418ea829d489a287563890 +nodepath-m/44'/60'/0'/6/ , relayerIndex-15 , address-0cd38ef9bc46fddc3d636d1710d41c93290c4f6b +nodepath-m/44'/60'/0'/6/ , relayerIndex-16 , address-fd354869e13d4c7638e1d49f10ef62a3f50929d5 +nodepath-m/44'/60'/0'/6/ , relayerIndex-17 , address-a1ac93c5f4de32b1996ca5b719d2d7ce85a52056 +nodepath-m/44'/60'/0'/6/ , relayerIndex-18 , address-899ff3eadcd5c0acea052760362b38053d33a867 +nodepath-m/44'/60'/0'/6/ , relayerIndex-19 , address-fe732ca624b8540264161dc385308db8fb2ac2a9 +nodepath-m/44'/60'/0'/6/ , relayerIndex-20 , address-6f0404cfa93093bdbecb67d4b3a4763d6d502101 +nodepath-m/44'/60'/0'/6/ , relayerIndex-21 , address-239ab36ee0b6647158d77a424314774ee738f3de +nodepath-m/44'/60'/0'/6/ , relayerIndex-22 , address-49bc25fd957e2397cb4ed883c5dd2970ec7037c2 +nodepath-m/44'/60'/0'/6/ , relayerIndex-23 , address-8cd8f5759be675a57af9870571f0edebb1f6ba1d +nodepath-m/44'/60'/0'/6/ , relayerIndex-24 , address-4deac4e2dd3ff64a3ae97b45a5fc1ce2d23395af +nodepath-m/44'/60'/0'/6/ , relayerIndex-25 , address-c775324812634cd8b185cbec1935388644431bb4 +nodepath-m/44'/60'/0'/6/ , relayerIndex-26 , address-badddde00124e453861fbfc9ea39e93bd30fdf88 +nodepath-m/44'/60'/0'/6/ , relayerIndex-27 , address-4da28bd12ab8327f548d2927c7e39a8e53c4436a +nodepath-m/44'/60'/0'/6/ , relayerIndex-28 , address-ef9684cc5a992ad1c1296975eea6c230f413eb51 +nodepath-m/44'/60'/0'/6/ , relayerIndex-29 , address-8df53b0735fcd397b4b8c2bd37e87b561f0e27b3 +nodepath-m/44'/60'/0'/6/ , relayerIndex-30 , address-9edc7ad18e97e1009fe8f5a453d79cda766c575b +nodepath-m/44'/60'/0'/6/ , relayerIndex-31 , address-40ae74a1c0ec044753c3161da1504203faed94cb +nodepath-m/44'/60'/0'/6/ , relayerIndex-32 , address-d7ace3aa360cdcd81e69aa70079b5a44dce0011b +nodepath-m/44'/60'/0'/6/ , relayerIndex-33 , address-2970b46ba5a0f14ad11f5677d8dd48aa8a4768d2 +nodepath-m/44'/60'/0'/6/ , relayerIndex-34 , address-99ea0dbb2eb2c75701804c21230e53697e5944df +nodepath-m/44'/60'/0'/6/ , relayerIndex-35 , address-04e76818e8a7f6e15f5559dd2a80ee05d2c879ab +nodepath-m/44'/60'/0'/6/ , relayerIndex-36 , address-fdf4a0d1d379c94423f6e0aea2968d4c459cafba +nodepath-m/44'/60'/0'/6/ , relayerIndex-37 , address-692fd055f7911df9e4ab0eec338671eb320d1a71 +nodepath-m/44'/60'/0'/6/ , relayerIndex-38 , address-baf8ce3da44f8221f439e3d16c8281bd86633a56 +nodepath-m/44'/60'/0'/6/ , relayerIndex-39 , address-3e5efacc8ea639f31edde3056c4b52dd5f69542c +nodepath-m/44'/60'/0'/6/ , relayerIndex-40 , address-c445890503ddd7c8cd99052398abbbc87cdd7c83 +nodepath-m/44'/60'/0'/6/ , relayerIndex-41 , address-c8cdbfcaf127e7aa2fe0a307ea8e88f7a5c36333 +nodepath-m/44'/60'/0'/6/ , relayerIndex-42 , address-47c95f762364f0c1f40c907ba38cde79d11908e9 +nodepath-m/44'/60'/0'/6/ , relayerIndex-43 , address-0a9eef0f3fccd3d44c1fd24fb0f4d959889c9396 +nodepath-m/44'/60'/0'/6/ , relayerIndex-44 , address-69be907ca73a29ed9a5d49681d549669a0467a3c +nodepath-m/44'/60'/0'/6/ , relayerIndex-45 , address-02211791258ebfe14e45e58b5e482be6c7255f11 +nodepath-m/44'/60'/0'/6/ , relayerIndex-46 , address-80d5e8654aa533855b185c984b2821cb9c43b21a +nodepath-m/44'/60'/0'/6/ , relayerIndex-47 , address-a2d1779733727ab7abc12c0eda6b84323f621b9e +nodepath-m/44'/60'/0'/6/ , relayerIndex-48 , address-a75b973848be43476aa3b872a9eda253713f3eff +nodepath-m/44'/60'/0'/6/ , relayerIndex-49 , address-44f7741bef85828325f4e01b05fd6b099d671733 +nodepath-m/44'/60'/0'/7/ , relayerIndex-0 , address-9b785767fba919d6de533d109ff2052c89a4365d +nodepath-m/44'/60'/0'/7/ , relayerIndex-1 , address-7e04e43765daf3e598ce49b290b26bf45025d7b1 +nodepath-m/44'/60'/0'/7/ , relayerIndex-2 , address-4178e6213155bf04a117550c9892d01e3d1b7768 +nodepath-m/44'/60'/0'/7/ , relayerIndex-3 , address-60d2a7962a78eac1b45f32ab01976d7ede487856 +nodepath-m/44'/60'/0'/7/ , relayerIndex-4 , address-3442ece99122a6f2de634cc374ca1a15ee7ce5f1 +nodepath-m/44'/60'/0'/7/ , relayerIndex-5 , address-c7f56ae100acb29f91b09ec945b5b22aced4f664 +nodepath-m/44'/60'/0'/7/ , relayerIndex-6 , address-d2eb5798bb27ff66d042b009f7c0b97772110d23 +nodepath-m/44'/60'/0'/7/ , relayerIndex-7 , address-26a8467f0cdc8c9524554fcc233fc79a6773f901 +nodepath-m/44'/60'/0'/7/ , relayerIndex-8 , address-65b7bb48024a03af743806992b205ad3ec5a0328 +nodepath-m/44'/60'/0'/7/ , relayerIndex-9 , address-d865dca386d0b8cf61eb46d1e38eb413a0f4bedd +nodepath-m/44'/60'/0'/7/ , relayerIndex-10 , address-ec4469cda4f54cc58e0ca3f18e28e8661fff928c +nodepath-m/44'/60'/0'/7/ , relayerIndex-11 , address-9820ff77a33ce44cf32a059b2443f0108e00fa56 +nodepath-m/44'/60'/0'/7/ , relayerIndex-12 , address-c1d0e7f70de92c305fdf65059402e820025df80e +nodepath-m/44'/60'/0'/7/ , relayerIndex-13 , address-2b5649c95ee11a4f9eb665315057386016fea042 +nodepath-m/44'/60'/0'/7/ , relayerIndex-14 , address-e00789c98754d0fa54b0aaf3aeb1efc46d4d328f +nodepath-m/44'/60'/0'/7/ , relayerIndex-15 , address-e2fe51c1e2b091e903acf744c5e9598659a7cf8c +nodepath-m/44'/60'/0'/7/ , relayerIndex-16 , address-e636fd8b5891aa1f3ba626805e153f5c6388e2d9 +nodepath-m/44'/60'/0'/7/ , relayerIndex-17 , address-44086512e39fe3a724a4f59e1b808afabf7470d3 +nodepath-m/44'/60'/0'/7/ , relayerIndex-18 , address-357038331de0dfcd3bb674772418dc57f0780066 +nodepath-m/44'/60'/0'/7/ , relayerIndex-19 , address-3d2c297dc6437981e44ede777252248ca8bc6608 +nodepath-m/44'/60'/0'/7/ , relayerIndex-20 , address-15a1af99ad9bc4695d0361934ee271f0f78b2ae3 +nodepath-m/44'/60'/0'/7/ , relayerIndex-21 , address-99b995ae15c7ad5d8da012e529a41f4115a4a9d5 +nodepath-m/44'/60'/0'/7/ , relayerIndex-22 , address-425df5364e0dcaf10576164158bb31d2a25ae609 +nodepath-m/44'/60'/0'/7/ , relayerIndex-23 , address-f7270589729c68e50704a6676c004c1fb46c96f8 +nodepath-m/44'/60'/0'/7/ , relayerIndex-24 , address-55a664e2654bb1aa9d7359f87450dd42b70d9a19 +nodepath-m/44'/60'/0'/7/ , relayerIndex-25 , address-ee7121e50896bf8ab1f40a9e4906fcb22f709de0 +nodepath-m/44'/60'/0'/7/ , relayerIndex-26 , address-a95d73d12916d82e20601f8dcc9d4d4f177c4c1d +nodepath-m/44'/60'/0'/7/ , relayerIndex-27 , address-0978d07c3c15eab8557562747d82f74b19596b2f +nodepath-m/44'/60'/0'/7/ , relayerIndex-28 , address-ccf8b0541f57da71ff786fdbd71b1270b4281b41 +nodepath-m/44'/60'/0'/7/ , relayerIndex-29 , address-ab2cadbc1d3afc33b48e8cda80832b9793cf52c5 +nodepath-m/44'/60'/0'/7/ , relayerIndex-30 , address-8f84ba67494becb2a28d001718083587fcebae02 +nodepath-m/44'/60'/0'/7/ , relayerIndex-31 , address-c7acf1d7afbe96a79e56083aca74be88a96273c6 +nodepath-m/44'/60'/0'/7/ , relayerIndex-32 , address-24c1c4d7b3061242dae5982bb99f5e9ed4896401 +nodepath-m/44'/60'/0'/7/ , relayerIndex-33 , address-3808e18c5eedd7353ac0f2e173fb755402523d4d +nodepath-m/44'/60'/0'/7/ , relayerIndex-34 , address-3190e9fb211bcd8cc96b252b29c551d5594f4344 +nodepath-m/44'/60'/0'/7/ , relayerIndex-35 , address-4fac9bed97f3cd0fe41bcd479640a5c058a20e48 +nodepath-m/44'/60'/0'/7/ , relayerIndex-36 , address-0a028319e97023cf663b627f1529229e290eb25a +nodepath-m/44'/60'/0'/7/ , relayerIndex-37 , address-332d930f5d3a0f955b2bd3c874f2817ce148f31f +nodepath-m/44'/60'/0'/7/ , relayerIndex-38 , address-7012a67cd347cefa439acdb7d86c75fae60a7e59 +nodepath-m/44'/60'/0'/7/ , relayerIndex-39 , address-9f781e0a6325cf0993ffdf4add8d37bbf175fd3b +nodepath-m/44'/60'/0'/7/ , relayerIndex-40 , address-7234172d3031bb5ed2e376167cc9a25561c48b36 +nodepath-m/44'/60'/0'/7/ , relayerIndex-41 , address-e590a5e56b8bffa2423d15d514caf4a082115c16 +nodepath-m/44'/60'/0'/7/ , relayerIndex-42 , address-5ac6e2006e2b220eed419c5e23b6400a3a939db3 +nodepath-m/44'/60'/0'/7/ , relayerIndex-43 , address-be8351972ba293e1a5c56e3436a3e11967b002c2 +nodepath-m/44'/60'/0'/7/ , relayerIndex-44 , address-5eecdc1ae3764e88606c0e31a944ffb4e240ff9d +nodepath-m/44'/60'/0'/7/ , relayerIndex-45 , address-76469affe10780ca49646b5faea2322b37bed03f +nodepath-m/44'/60'/0'/7/ , relayerIndex-46 , address-c85accf387fe381438b76552e55828f99052172c +nodepath-m/44'/60'/0'/7/ , relayerIndex-47 , address-5a7b347b87377016516f043497cf7da8406f2df2 +nodepath-m/44'/60'/0'/7/ , relayerIndex-48 , address-5ab5a3582f0620b2a3c4eb6f1779dde864ef4c36 +nodepath-m/44'/60'/0'/7/ , relayerIndex-49 , address-ff0f7ee7205c2f896c5ba4c0a3e45e67f7225a79 +nodepath-m/44'/60'/0'/8/ , relayerIndex-0 , address-ee1ff5db7efb7ae70ff905c6bc0924d4310843d6 +nodepath-m/44'/60'/0'/8/ , relayerIndex-1 , address-3eaab522a9f44d057ffd99d82a0397159e766861 +nodepath-m/44'/60'/0'/8/ , relayerIndex-2 , address-1c9a970fa2a0ceeb2f8a8181277903ee4faf65db +nodepath-m/44'/60'/0'/8/ , relayerIndex-3 , address-555bd8e98eac8ec8a93788a4a36cb050a0eb8674 +nodepath-m/44'/60'/0'/8/ , relayerIndex-4 , address-938fb1215b294db9a14b07bef8e1d75dc2495c05 +nodepath-m/44'/60'/0'/8/ , relayerIndex-5 , address-dd0698d7068d347331be56db4633347a0158e96f +nodepath-m/44'/60'/0'/8/ , relayerIndex-6 , address-2262bb3a6004fd2c57542cade908481c7d446ab3 +nodepath-m/44'/60'/0'/8/ , relayerIndex-7 , address-12f1d94bbd564ff4123fb7b8540fa442b9fb33eb +nodepath-m/44'/60'/0'/8/ , relayerIndex-8 , address-5221114156c87ba8873e5101194d338126869e93 +nodepath-m/44'/60'/0'/8/ , relayerIndex-9 , address-2ef99c451db71a0efb7dfb799937bc0632961c48 +nodepath-m/44'/60'/0'/8/ , relayerIndex-10 , address-f1c68652faae0ecf5ff2758af629e0f6107494fb +nodepath-m/44'/60'/0'/8/ , relayerIndex-11 , address-abc44e6a5c66cbdb9f7ac6d1e9bfe7c82e571cb2 +nodepath-m/44'/60'/0'/8/ , relayerIndex-12 , address-02b74acbbedcd028fcf7619fbc2e7ccc691cdcfb +nodepath-m/44'/60'/0'/8/ , relayerIndex-13 , address-98b76d9907fe1d14fc4f4e4bc0c420b1fffa800a +nodepath-m/44'/60'/0'/8/ , relayerIndex-14 , address-52566dad98c960095c9851f9b1c7fef3f0c33ffd +nodepath-m/44'/60'/0'/8/ , relayerIndex-15 , address-57a792653e03f4d5cbeebd7e16a8fb5028fef49f +nodepath-m/44'/60'/0'/8/ , relayerIndex-16 , address-e965f6d2f8e62b7c86acddaf7f0313d15fba6fa0 +nodepath-m/44'/60'/0'/8/ , relayerIndex-17 , address-3b2ed02f8bcf2ea16345b32732dc8ff71af60cfa +nodepath-m/44'/60'/0'/8/ , relayerIndex-18 , address-56901a98f1b5773b401d244632e50845fd6af433 +nodepath-m/44'/60'/0'/8/ , relayerIndex-19 , address-8c96f0c2d914ea7350cb2369a2f691071243787b +nodepath-m/44'/60'/0'/8/ , relayerIndex-20 , address-7d050e9849c83c77f64be1468485b3b6e615507c +nodepath-m/44'/60'/0'/8/ , relayerIndex-21 , address-7c9c6e7b36fd5369f0a4f15cacc60432df7ccd2b +nodepath-m/44'/60'/0'/8/ , relayerIndex-22 , address-6897806e895afd51ad943b95e9108b11eaf39629 +nodepath-m/44'/60'/0'/8/ , relayerIndex-23 , address-01648ca8c23b92bda94f42a4c0c8061336e7c4d8 +nodepath-m/44'/60'/0'/8/ , relayerIndex-24 , address-5e8872302dafbc69d22e730602c0bf7f66749b68 +nodepath-m/44'/60'/0'/8/ , relayerIndex-25 , address-3af42510fbcf24e1958bd98c0bcf2220049089ad +nodepath-m/44'/60'/0'/8/ , relayerIndex-26 , address-7e4a3b5a05d11f54bedb5c3a35eb6ce895fffe48 +nodepath-m/44'/60'/0'/8/ , relayerIndex-27 , address-b7b00b17746931917907e09a5d43c9912bd0beac +nodepath-m/44'/60'/0'/8/ , relayerIndex-28 , address-1cf62a294d33e350f1841afe685a740ad9f17c37 +nodepath-m/44'/60'/0'/8/ , relayerIndex-29 , address-513db99cbc477aaf0348426d6331536422063d6a +nodepath-m/44'/60'/0'/8/ , relayerIndex-30 , address-f0517afa1362a7beebc8a8f7ad7a0ad4a59d1c35 +nodepath-m/44'/60'/0'/8/ , relayerIndex-31 , address-745cad361e8a4f74aa4d3bba11a658fde8d03982 +nodepath-m/44'/60'/0'/8/ , relayerIndex-32 , address-7f9fedd15e87b897c6a2e961c8ced0242d026939 +nodepath-m/44'/60'/0'/8/ , relayerIndex-33 , address-1c1eb22b438196cf4a495e6b744163f0980371dd +nodepath-m/44'/60'/0'/8/ , relayerIndex-34 , address-24775e7500df2285863b3d66821cc1330fd0515f +nodepath-m/44'/60'/0'/8/ , relayerIndex-35 , address-50e4b0ebfcaac2c48182f85bdac82fa64433705e +nodepath-m/44'/60'/0'/8/ , relayerIndex-36 , address-e4354b61836013b8c68d21a17a482c590d9bce3a +nodepath-m/44'/60'/0'/8/ , relayerIndex-37 , address-5e914742f183e4e125af48e40dbb8eee01c20f00 +nodepath-m/44'/60'/0'/8/ , relayerIndex-38 , address-d4ae9c672473e129d5ff9a2c4aecf49702ee2827 +nodepath-m/44'/60'/0'/8/ , relayerIndex-39 , address-99f065a686da681fcf5c137c782c65c1726885df +nodepath-m/44'/60'/0'/8/ , relayerIndex-40 , address-f029853ea5f2200bc1b51fbe8c294b8f3aaebaf5 +nodepath-m/44'/60'/0'/8/ , relayerIndex-41 , address-a61483da40eb142b13968af25ef4a7c9e1e3bec8 +nodepath-m/44'/60'/0'/8/ , relayerIndex-42 , address-49997539c469848031221251891fc21e277fabc3 +nodepath-m/44'/60'/0'/8/ , relayerIndex-43 , address-6277dedf5dd1d7fd5f28f73d0af0ff902bffd486 +nodepath-m/44'/60'/0'/8/ , relayerIndex-44 , address-9d9eff4cf0420d16c74865c9a207b601d0506992 +nodepath-m/44'/60'/0'/8/ , relayerIndex-45 , address-743d937e99cc54dbc9827e6cff0daf439ac31854 +nodepath-m/44'/60'/0'/8/ , relayerIndex-46 , address-afa58bc4157d38183c94a618ee366a0d8f7c850a +nodepath-m/44'/60'/0'/8/ , relayerIndex-47 , address-f5720e65af27b08b1030e8303183bfd18b8b2060 +nodepath-m/44'/60'/0'/8/ , relayerIndex-48 , address-1cbce9e7cfec22c540b2c8e57fee5d043893b36b +nodepath-m/44'/60'/0'/8/ , relayerIndex-49 , address-c8e12bd6a18d79198790c599f0bde8a777b569da +nodepath-m/44'/60'/0'/9/ , relayerIndex-0 , address-471418990d1f89ec36ba286fd979472658c947ff +nodepath-m/44'/60'/0'/9/ , relayerIndex-1 , address-c8e22a6063215a0c4c6a31c1aedee40dfedb3784 +nodepath-m/44'/60'/0'/9/ , relayerIndex-2 , address-af9367956111ab51088f9304c0290d54acbe7d4f +nodepath-m/44'/60'/0'/9/ , relayerIndex-3 , address-18f1a8eca0b9185b7ae2d89e3f266a95ee638a1f +nodepath-m/44'/60'/0'/9/ , relayerIndex-4 , address-8483eb2ea7ab676ec337d237e5b178e7642b33c7 +nodepath-m/44'/60'/0'/9/ , relayerIndex-5 , address-d6a24352a6797cf97f128aced691dc31fb71aec3 +nodepath-m/44'/60'/0'/9/ , relayerIndex-6 , address-c73fd99dfa94cd3b93f2639bcc11aa4b94557214 +nodepath-m/44'/60'/0'/9/ , relayerIndex-7 , address-c003fe51d358353335cecc87388dd24d7100921e +nodepath-m/44'/60'/0'/9/ , relayerIndex-8 , address-5c77884b2534e5cc1059861136fbf8240c6aa4fb +nodepath-m/44'/60'/0'/9/ , relayerIndex-9 , address-e48e82064f859562989046431ae88e4189020c94 +nodepath-m/44'/60'/0'/9/ , relayerIndex-10 , address-9325987de5e0990e95f93ac68c26d2ecc4db6e14 +nodepath-m/44'/60'/0'/9/ , relayerIndex-11 , address-9abb00aafec1fb5bcf92e6e1410e307ed621b5c2 +nodepath-m/44'/60'/0'/9/ , relayerIndex-12 , address-b5d08199409ffc49f1c328b6e9a473ca7e7cffc1 +nodepath-m/44'/60'/0'/9/ , relayerIndex-13 , address-7c9720a35859e4340429363838203639878023ae +nodepath-m/44'/60'/0'/9/ , relayerIndex-14 , address-2960e02aa046e431e66958c6e5276d8a51c2df3f +nodepath-m/44'/60'/0'/9/ , relayerIndex-15 , address-a215197b805dbf9ff4c8042caae985f14a2a9545 +nodepath-m/44'/60'/0'/9/ , relayerIndex-16 , address-5abf6e9e900926936e2634b5dd5488f6c71dfa6f +nodepath-m/44'/60'/0'/9/ , relayerIndex-17 , address-d8fd28d03f0c319e906747671a18a67f3305c408 +nodepath-m/44'/60'/0'/9/ , relayerIndex-18 , address-ea4e32db7961097134acac11f4e4cb3cc6f4995e +nodepath-m/44'/60'/0'/9/ , relayerIndex-19 , address-b3f5ebd451a7865288ee0cb4bd563a6aa094fd7a +nodepath-m/44'/60'/0'/9/ , relayerIndex-20 , address-e2d4a63c5c3e7dcfd64896596eea5f7d8d52a119 +nodepath-m/44'/60'/0'/9/ , relayerIndex-21 , address-942314ff90b1aaf3a0b4cd52bb4e6dffa6e1a9dc +nodepath-m/44'/60'/0'/9/ , relayerIndex-22 , address-977838a866523851a2dc21ea3f59e180f40e29f0 +nodepath-m/44'/60'/0'/9/ , relayerIndex-23 , address-1b03d40cfbbfff40971c3233493ca10277018574 +nodepath-m/44'/60'/0'/9/ , relayerIndex-24 , address-89895c7074bb1a0e14bb72b6d4516d919d1fbc44 +nodepath-m/44'/60'/0'/9/ , relayerIndex-25 , address-bba7b3f8538d987f4307a1c60685f2c49df94a50 +nodepath-m/44'/60'/0'/9/ , relayerIndex-26 , address-7902b7ef32fad3facdf719937b52bb6f80754e18 +nodepath-m/44'/60'/0'/9/ , relayerIndex-27 , address-617a95d7b9e6c9fd28d99ad40853a34827508399 +nodepath-m/44'/60'/0'/9/ , relayerIndex-28 , address-1247117d6c52d3532bf6068714e507d47b128466 +nodepath-m/44'/60'/0'/9/ , relayerIndex-29 , address-2dae84cda2719273900945fe39a32e68b03925b6 +nodepath-m/44'/60'/0'/9/ , relayerIndex-30 , address-287d2cbdd6a3d0311908fa2644709ea7c7953505 +nodepath-m/44'/60'/0'/9/ , relayerIndex-31 , address-f088b37db04cb3c2ef1e0acf5439fb15346697ad +nodepath-m/44'/60'/0'/9/ , relayerIndex-32 , address-513f94867a4d2dfb976b34a3084c6d873bd2ea6e +nodepath-m/44'/60'/0'/9/ , relayerIndex-33 , address-015bda52e413ed6ebe1312797d8d229eb775344f +nodepath-m/44'/60'/0'/9/ , relayerIndex-34 , address-11b56b07980fa201a4248d8e6cdc8a46299d61a2 +nodepath-m/44'/60'/0'/9/ , relayerIndex-35 , address-2f0a6aafbddbe9146d3ced46423fb1cd5c121f41 +nodepath-m/44'/60'/0'/9/ , relayerIndex-36 , address-008ac853ced3939e0bcf510c7e7df18f447f17d7 +nodepath-m/44'/60'/0'/9/ , relayerIndex-37 , address-ac379e77274e24a1381d035948d14c92bdcc701b +nodepath-m/44'/60'/0'/9/ , relayerIndex-38 , address-cb46aa218b0408a18a8edbff486df0fa15b6bd31 +nodepath-m/44'/60'/0'/9/ , relayerIndex-39 , address-d4eb17a9b62e37a82ec018a70a8d037761ceaee6 +nodepath-m/44'/60'/0'/9/ , relayerIndex-40 , address-51ad9832578fc791de2202ed96ec298b7bae59c5 +nodepath-m/44'/60'/0'/9/ , relayerIndex-41 , address-58dfcfe5175fe83a370b24b3ef7543ac3b89fc19 +nodepath-m/44'/60'/0'/9/ , relayerIndex-42 , address-8ed6b80a8beadef193d161c0aae71d59ee682849 +nodepath-m/44'/60'/0'/9/ , relayerIndex-43 , address-c1ce28fbd97787225d4dd7c202d2903400bbcee4 +nodepath-m/44'/60'/0'/9/ , relayerIndex-44 , address-eb4380066609133d01f79448c56bee59f42968d7 +nodepath-m/44'/60'/0'/9/ , relayerIndex-45 , address-273a956f2ca4200291df41460a900d7f063b0e13 +nodepath-m/44'/60'/0'/9/ , relayerIndex-46 , address-f6ff4679de63f4899d7e47360e108833a72ce698 +nodepath-m/44'/60'/0'/9/ , relayerIndex-47 , address-42a42b14caec2b18b0508f83cce6712c7e29393a +nodepath-m/44'/60'/0'/9/ , relayerIndex-48 , address-928bf0d900f1b5882e469adf532e07f64b7d0149 +nodepath-m/44'/60'/0'/9/ , relayerIndex-49 , address-265c3fd0bf4c4d4a932c25e5cabda32de1da089d +nodepath-m/44'/60'/0'/10/ , relayerIndex-0 , address-160fa7adbeb20fd0df5fd41e0a84d80fe9b7b86c +nodepath-m/44'/60'/0'/10/ , relayerIndex-1 , address-24f473d7b9f70c11dfb7807bba5a1176a034ba7e +nodepath-m/44'/60'/0'/10/ , relayerIndex-2 , address-abefa8c1708bd67269626dd160b9f204e482ec90 +nodepath-m/44'/60'/0'/10/ , relayerIndex-3 , address-8243f22bddbb6373a46a0e944331f56bebbed3a2 +nodepath-m/44'/60'/0'/10/ , relayerIndex-4 , address-cd8880f8a80a79c734b24f54b0763e3ffdc6b906 +nodepath-m/44'/60'/0'/10/ , relayerIndex-5 , address-9f5d8163e6df1aa9fddadfa7b8b20915e0a9f17a +nodepath-m/44'/60'/0'/10/ , relayerIndex-6 , address-433e3f40498d2d2d470ca12fe4e7c4a79d81deb5 +nodepath-m/44'/60'/0'/10/ , relayerIndex-7 , address-c002e467c78e9594bfe1420925697a780a878ab3 +nodepath-m/44'/60'/0'/10/ , relayerIndex-8 , address-c2b9c26ad769b48b3277b55d040eaad15e61d621 +nodepath-m/44'/60'/0'/10/ , relayerIndex-9 , address-2ab3a0607b97ff9c320648d488c713eb94b0a588 +nodepath-m/44'/60'/0'/10/ , relayerIndex-10 , address-c1c8bb317f43c168848a7589e536c8afca0a9c9f +nodepath-m/44'/60'/0'/10/ , relayerIndex-11 , address-6cca008e909f9dd1cc5e6cccf3f808a4217a80c0 +nodepath-m/44'/60'/0'/10/ , relayerIndex-12 , address-ffc5d1fc36c32d60f51f95e34f3c83cc41a1167d +nodepath-m/44'/60'/0'/10/ , relayerIndex-13 , address-37504af839a4c8dbeb178c8b825947c5238c246a +nodepath-m/44'/60'/0'/10/ , relayerIndex-14 , address-012f880b35317c6fb08b2311b698717dc51fc1cc +nodepath-m/44'/60'/0'/10/ , relayerIndex-15 , address-a30d96b915f78720567f864850b155c6be4f45d5 +nodepath-m/44'/60'/0'/10/ , relayerIndex-16 , address-d751bc9bce9c99b0cd046c7975dc1b59ff84ca18 +nodepath-m/44'/60'/0'/10/ , relayerIndex-17 , address-3de39c115e39d68dae2eeca5672db3f8fe0811b4 +nodepath-m/44'/60'/0'/10/ , relayerIndex-18 , address-6470bb5a55dc4703a346a2085c0c1a37c27e335d +nodepath-m/44'/60'/0'/10/ , relayerIndex-19 , address-2c7e1fb69ce03d49f4f4eb6a257acb51362eccb2 +nodepath-m/44'/60'/0'/10/ , relayerIndex-20 , address-06fb3a2e9774cfa483245afafeb3b2337fbee33f +nodepath-m/44'/60'/0'/10/ , relayerIndex-21 , address-1c193929a1dc5ed2108af29c31a95ea20237f370 +nodepath-m/44'/60'/0'/10/ , relayerIndex-22 , address-6642af909b5edc95f74bba72b27b0786551804ec +nodepath-m/44'/60'/0'/10/ , relayerIndex-23 , address-66c6ff3362e92a8945401adbfba2c5906412853a +nodepath-m/44'/60'/0'/10/ , relayerIndex-24 , address-64696b9194d460cb276f0326757ec087e64402be +nodepath-m/44'/60'/0'/10/ , relayerIndex-25 , address-1bec46ed15459978215a2073c9ffd95f43d66660 +nodepath-m/44'/60'/0'/10/ , relayerIndex-26 , address-8dc837acb143f8a8e6dc961d6688970cbac7680c +nodepath-m/44'/60'/0'/10/ , relayerIndex-27 , address-d7a2abd99379486a63a6b29365e5a2246fce24f5 +nodepath-m/44'/60'/0'/10/ , relayerIndex-28 , address-06239d04a5ee21923016c6441a6be9e27d2c0fb3 +nodepath-m/44'/60'/0'/10/ , relayerIndex-29 , address-5ebbf2dd80d7701df4fbabc05659ad57621c6e0b +nodepath-m/44'/60'/0'/10/ , relayerIndex-30 , address-3dbebec5b9bd2b4b2240fff169c5582d806ba943 +nodepath-m/44'/60'/0'/10/ , relayerIndex-31 , address-f1b03dcce0a2e591e380c8e3c8432c670aabb49e +nodepath-m/44'/60'/0'/10/ , relayerIndex-32 , address-2b99dcaa9c3dde4003a9e0c23dc4dbfd1b21982b +nodepath-m/44'/60'/0'/10/ , relayerIndex-33 , address-88f9ff96a1f4c3e1672ede66d7317ce0ab6c9713 +nodepath-m/44'/60'/0'/10/ , relayerIndex-34 , address-90707dda5b85432acd4a18988152bb2ef4f3478f +nodepath-m/44'/60'/0'/10/ , relayerIndex-35 , address-fd96a1bbaa8414d3fede70a936e5b02a4033afcb +nodepath-m/44'/60'/0'/10/ , relayerIndex-36 , address-27827910c14835d03b74981693b5df92367263d3 +nodepath-m/44'/60'/0'/10/ , relayerIndex-37 , address-cbafcd44877cb225aef952d5295fe67d17969d87 +nodepath-m/44'/60'/0'/10/ , relayerIndex-38 , address-09169e9ec98a31f384dfa9787d9cde26841a0aae +nodepath-m/44'/60'/0'/10/ , relayerIndex-39 , address-b3e090ee300f02bbb2b61c0fb3e1d1ef916b94fe +nodepath-m/44'/60'/0'/10/ , relayerIndex-40 , address-915afce9b693fbb925b51c2a23c6442b9f8bcf56 +nodepath-m/44'/60'/0'/10/ , relayerIndex-41 , address-212a00e063dd7cab36f4fddc969b01f94408fbb8 +nodepath-m/44'/60'/0'/10/ , relayerIndex-42 , address-f13af5a760382cccdbea2f97adf6f117d70b15ac +nodepath-m/44'/60'/0'/10/ , relayerIndex-43 , address-42202be6a6f39394202f2e282d625163e4be682b +nodepath-m/44'/60'/0'/10/ , relayerIndex-44 , address-20635a491eb107aff92b625db79137d11ffb1c79 +nodepath-m/44'/60'/0'/10/ , relayerIndex-45 , address-1563fca9797dcf50c55f4987014fff4e96d37078 +nodepath-m/44'/60'/0'/10/ , relayerIndex-46 , address-79209afee7c7ca161530ca698708f4f0008cb33e +nodepath-m/44'/60'/0'/10/ , relayerIndex-47 , address-49e56c623ecfe915bc7fd2d1932e9e964a620e55 +nodepath-m/44'/60'/0'/10/ , relayerIndex-48 , address-fd5a31b9b9da404666709f7cd3442b55a2ea5eb8 +nodepath-m/44'/60'/0'/10/ , relayerIndex-49 , address-6f9497e23417bc3e02609bf9b85308f4645420ef +nodepath-m/44'/60'/0'/11/ , relayerIndex-0 , address-e37fb176d8d8d8ba15b29373ebbe249bef0de434 +nodepath-m/44'/60'/0'/11/ , relayerIndex-1 , address-bcc73f8ffb877726151824cdbe14c31c69d4cc74 +nodepath-m/44'/60'/0'/11/ , relayerIndex-2 , address-f52e620afb73313932e51b9991bc6891d6b63586 +nodepath-m/44'/60'/0'/11/ , relayerIndex-3 , address-d5b87d15299b518f208a06f3fe976d9f9bec520c +nodepath-m/44'/60'/0'/11/ , relayerIndex-4 , address-547f116bc4fd6e3beef9d9c5a20b3a6181b8948d +nodepath-m/44'/60'/0'/11/ , relayerIndex-5 , address-131582b0e5667b7ac00aee576b7d3aa1fa2d0873 +nodepath-m/44'/60'/0'/11/ , relayerIndex-6 , address-683650af887c5f2221348fea8958339ff6cedf41 +nodepath-m/44'/60'/0'/11/ , relayerIndex-7 , address-e68d7e42cfb6ef0f50ba9c5a4f48213e4f8ae0b8 +nodepath-m/44'/60'/0'/11/ , relayerIndex-8 , address-318723a6ee7c06a037e6cd6b041d4bb84e8902ea +nodepath-m/44'/60'/0'/11/ , relayerIndex-9 , address-ecbc25809e39ef37abe75338a6558227edefab5e +nodepath-m/44'/60'/0'/11/ , relayerIndex-10 , address-af942120a5b468286d09b5d2108f90a4563c9bd7 +nodepath-m/44'/60'/0'/11/ , relayerIndex-11 , address-20b1d521117a7400aaefc71b897730ad18f2aa05 +nodepath-m/44'/60'/0'/11/ , relayerIndex-12 , address-5458bde63c8dbfda04c08d9dfe3ed5af09ccca88 +nodepath-m/44'/60'/0'/11/ , relayerIndex-13 , address-2d8f616bffbb02c01b2145a403b653a00e1c269c +nodepath-m/44'/60'/0'/11/ , relayerIndex-14 , address-3782cc1c4c3c19d0d764877f5f51e0a24d24ef4b +nodepath-m/44'/60'/0'/11/ , relayerIndex-15 , address-d626324da3a03e10e916126cc011ca4204bf8eea +nodepath-m/44'/60'/0'/11/ , relayerIndex-16 , address-37f460c8c804e3d7ab7affdee46ce882bd91dda2 +nodepath-m/44'/60'/0'/11/ , relayerIndex-17 , address-ea13acbf8569964e0376580dff856b9bbd6ee01a +nodepath-m/44'/60'/0'/11/ , relayerIndex-18 , address-ebc91b55b18df0a6eea71fe1ea0045b2ed442eee +nodepath-m/44'/60'/0'/11/ , relayerIndex-19 , address-27ae084dc9d28a7f8d1d3359adc519a1c2b92cae +nodepath-m/44'/60'/0'/11/ , relayerIndex-20 , address-2ae35c6f66189d8c5ccdb7e8f44c20604caa7448 +nodepath-m/44'/60'/0'/11/ , relayerIndex-21 , address-8de680baa3116025c0ce1cf99566e41cf227fcdf +nodepath-m/44'/60'/0'/11/ , relayerIndex-22 , address-0c03f4ef3065d93af970a47ae78d721dcf70785c +nodepath-m/44'/60'/0'/11/ , relayerIndex-23 , address-fd9d721a972dbbb148a467b7a4ae4158bbe81314 +nodepath-m/44'/60'/0'/11/ , relayerIndex-24 , address-fde85009ad37353420e965a5ed541b7af7b56493 +nodepath-m/44'/60'/0'/11/ , relayerIndex-25 , address-4c289014e7970ba888189da53c6530eeef51e109 +nodepath-m/44'/60'/0'/11/ , relayerIndex-26 , address-41fa67a678f1941af87f22466cef100688846dc5 +nodepath-m/44'/60'/0'/11/ , relayerIndex-27 , address-6eb4255678406695ee878b34794ac85ad4ac40ca +nodepath-m/44'/60'/0'/11/ , relayerIndex-28 , address-a1664e49cfd6749a55eb22eb6855c06528686f1c +nodepath-m/44'/60'/0'/11/ , relayerIndex-29 , address-35a4f0cf80d365f198c27ed77b6e23f69dace03c +nodepath-m/44'/60'/0'/11/ , relayerIndex-30 , address-67faeb2086ef3d3854d034c61ce8b283d2b3da3c +nodepath-m/44'/60'/0'/11/ , relayerIndex-31 , address-3265df4150dd13042a5f4c71844158c85aca94af +nodepath-m/44'/60'/0'/11/ , relayerIndex-32 , address-a9d7d7d91412903b2ba536a6b8d113508aae5529 +nodepath-m/44'/60'/0'/11/ , relayerIndex-33 , address-c8993229c435d16c894442ebebc14c8070470b56 +nodepath-m/44'/60'/0'/11/ , relayerIndex-34 , address-5293b50b9d8960227f2a606f7c2c47f42a2f5607 +nodepath-m/44'/60'/0'/11/ , relayerIndex-35 , address-43511bc65c9a33b2ca3bc349383313a3bda96c65 +nodepath-m/44'/60'/0'/11/ , relayerIndex-36 , address-3ea372542de4c3330319bf448a53532583a3ef8c +nodepath-m/44'/60'/0'/11/ , relayerIndex-37 , address-5a9da78158027a07bdc1886ab114b4909271389f +nodepath-m/44'/60'/0'/11/ , relayerIndex-38 , address-e6681c911dbd204f789f02476f899d4e0697cc6a +nodepath-m/44'/60'/0'/11/ , relayerIndex-39 , address-dfa1b968ba92db48c4d5ceb7fe40e8c0f88c5575 +nodepath-m/44'/60'/0'/11/ , relayerIndex-40 , address-bd78a165bda7895ea553c326e7cc26ece15caec1 +nodepath-m/44'/60'/0'/11/ , relayerIndex-41 , address-a2ffb4d309645122b5961dbebc838f7387fa4724 +nodepath-m/44'/60'/0'/11/ , relayerIndex-42 , address-c186b0e6d81bf9a93b600ef4b934759e5ca71b06 +nodepath-m/44'/60'/0'/11/ , relayerIndex-43 , address-82e3df29baf7d37c9ba94378631bb54dacbe5899 +nodepath-m/44'/60'/0'/11/ , relayerIndex-44 , address-f71a3dd8b148dd1c208685f9c8206d6337db3083 +nodepath-m/44'/60'/0'/11/ , relayerIndex-45 , address-8458487cb1967ccdbc48ed1b6896cc16c76f11cb +nodepath-m/44'/60'/0'/11/ , relayerIndex-46 , address-c9f099a10ec4428f0c1fb4163ef80f3ada9e1faf +nodepath-m/44'/60'/0'/11/ , relayerIndex-47 , address-4e19a2f48cf672c79e57cfb1d611a892224f0583 +nodepath-m/44'/60'/0'/11/ , relayerIndex-48 , address-10ca74761a0a9e65922b48daab8cc33c148a6b15 +nodepath-m/44'/60'/0'/11/ , relayerIndex-49 , address-2728528f8f82ab58c3171fa9d231e9c0e2a5384d +nodepath-m/44'/60'/0'/12/ , relayerIndex-0 , address-b421c287878cdd22ff60b6bd8cea1cb4f7e73445 +nodepath-m/44'/60'/0'/12/ , relayerIndex-1 , address-6c7c8dae05be5f82fca93d498bf12c7905e16d2d +nodepath-m/44'/60'/0'/12/ , relayerIndex-2 , address-8c0620c071da5f4a9a96270c4e8245d84ed766e6 +nodepath-m/44'/60'/0'/12/ , relayerIndex-3 , address-a5a38e42ec7a1c92dab347ad81824963041e7df3 +nodepath-m/44'/60'/0'/12/ , relayerIndex-4 , address-48b016cbca7bf795c688ad47c46cac3e2168272e +nodepath-m/44'/60'/0'/12/ , relayerIndex-5 , address-daa66092c1ef641a22b31210860a8d9d9da7fcc0 +nodepath-m/44'/60'/0'/12/ , relayerIndex-6 , address-73e79334fef40bc0732680182f94224fa373807f +nodepath-m/44'/60'/0'/12/ , relayerIndex-7 , address-54d6aa2e7b247f086bcaa5089e75d3dd10468c91 +nodepath-m/44'/60'/0'/12/ , relayerIndex-8 , address-d3bec3988489354609fe9e10536fb615e3c0c886 +nodepath-m/44'/60'/0'/12/ , relayerIndex-9 , address-c189a6b5f8fea6f3504e32d15b66e2bc9bcb5108 +nodepath-m/44'/60'/0'/12/ , relayerIndex-10 , address-7ba2dc2f371de4ab93c20676c148adbee16f874b +nodepath-m/44'/60'/0'/12/ , relayerIndex-11 , address-176d9c8165b7cb78c43cc77497045f6f778c7169 +nodepath-m/44'/60'/0'/12/ , relayerIndex-12 , address-ea624fb0baf0afe6f149c7f5429af35c592d9b80 +nodepath-m/44'/60'/0'/12/ , relayerIndex-13 , address-ccaa1b3226b1c5dc4b153c6b2565ed1a263c26ad +nodepath-m/44'/60'/0'/12/ , relayerIndex-14 , address-9905024c8ee0ed1b5f39dc53f337f2bcc8bde364 +nodepath-m/44'/60'/0'/12/ , relayerIndex-15 , address-6bf2754eccf54eb3eedfbd3c67ffbf073de84502 +nodepath-m/44'/60'/0'/12/ , relayerIndex-16 , address-e6ed3a97bfee335374486090584f1349af30b209 +nodepath-m/44'/60'/0'/12/ , relayerIndex-17 , address-88d8841952f332ff2ed83e60b43739eb00670323 +nodepath-m/44'/60'/0'/12/ , relayerIndex-18 , address-6627e1c432ae1094480f741736f538af72653aa7 +nodepath-m/44'/60'/0'/12/ , relayerIndex-19 , address-9c2b13f00ffb71888ec310b6e21564f44698fcbd +nodepath-m/44'/60'/0'/12/ , relayerIndex-20 , address-cea4a44dd6dfd33d8bb89a3c9290fe31e28ab713 +nodepath-m/44'/60'/0'/12/ , relayerIndex-21 , address-8ee8d233ecfb004de133eb61aaf055b2c92b12c8 +nodepath-m/44'/60'/0'/12/ , relayerIndex-22 , address-aa91c0c773adb704ecb014d228a4cfacc279f03c +nodepath-m/44'/60'/0'/12/ , relayerIndex-23 , address-3b3cebb57e3794f397ef463a4c8365a58997f842 +nodepath-m/44'/60'/0'/12/ , relayerIndex-24 , address-d2c4c6e94eb934f509da50270f5eeb0ac0807cf5 +nodepath-m/44'/60'/0'/12/ , relayerIndex-25 , address-b003d085f15acee980459a408c8f8f0d05ab2d0d +nodepath-m/44'/60'/0'/12/ , relayerIndex-26 , address-cbb2e1998ca954115cecefe7945ea865f17de99a +nodepath-m/44'/60'/0'/12/ , relayerIndex-27 , address-a8250ffa41d30a71052d7187a1561c5810c7aa7b +nodepath-m/44'/60'/0'/12/ , relayerIndex-28 , address-30ba7b5f8accb75d75cac643fd18d12f10009028 +nodepath-m/44'/60'/0'/12/ , relayerIndex-29 , address-f65533961461fdb16a9b6531be49371fee8a43b5 +nodepath-m/44'/60'/0'/12/ , relayerIndex-30 , address-3514cad181af60bc1239ad5221dd6775b011a19e +nodepath-m/44'/60'/0'/12/ , relayerIndex-31 , address-b5f30ae92bfbba22d8541bbc7cf87e35cacafc70 +nodepath-m/44'/60'/0'/12/ , relayerIndex-32 , address-11cb670868a99ff8b8eb35d1525a30643e1161e7 +nodepath-m/44'/60'/0'/12/ , relayerIndex-33 , address-bb39c32fecd9f068a34dc7ac1cfd535a6c84c6d8 +nodepath-m/44'/60'/0'/12/ , relayerIndex-34 , address-bcf1881311d42e60ebfaa349cd770730b0be97ee +nodepath-m/44'/60'/0'/12/ , relayerIndex-35 , address-305bb91c89c0e2ec71a7502bf21a66a3a81d65b9 +nodepath-m/44'/60'/0'/12/ , relayerIndex-36 , address-605d286bd942a1de0f639fc9ed4e85fae84c8ef0 +nodepath-m/44'/60'/0'/12/ , relayerIndex-37 , address-097dde30c4a6b7f54f57cbc5e2a8467f64d54c8c +nodepath-m/44'/60'/0'/12/ , relayerIndex-38 , address-cbac69e0e0e3659523c8ceeca91468c4bc4460d7 +nodepath-m/44'/60'/0'/12/ , relayerIndex-39 , address-d39c76ea928a14c1873e4db46a80bc1ea9ac81dc +nodepath-m/44'/60'/0'/12/ , relayerIndex-40 , address-146435ae02a6b1af5e64ddf2b28100e0bddd071d +nodepath-m/44'/60'/0'/12/ , relayerIndex-41 , address-642d1584a4e5662e9e4e5d09c07ba002be837c53 +nodepath-m/44'/60'/0'/12/ , relayerIndex-42 , address-564753267b0c4d7dc6a14b285330de1cd3bb016b +nodepath-m/44'/60'/0'/12/ , relayerIndex-43 , address-e54178a3a99bab199bb59382e98cb3af684c8eba +nodepath-m/44'/60'/0'/12/ , relayerIndex-44 , address-f61bc4fb37cda8e39e9c4f0f9a416875422052a9 +nodepath-m/44'/60'/0'/12/ , relayerIndex-45 , address-de2d46c8c28d2fa34eafcf57e4a6e66bb7cc5785 +nodepath-m/44'/60'/0'/12/ , relayerIndex-46 , address-e6a73f028d10cf194b09ce0ffaf5963f47c54563 +nodepath-m/44'/60'/0'/12/ , relayerIndex-47 , address-9f26b06d462bf754e03984fde5a277c393c4a419 +nodepath-m/44'/60'/0'/12/ , relayerIndex-48 , address-33f917e25ad227faef4120c9465d8ca42016a36c +nodepath-m/44'/60'/0'/12/ , relayerIndex-49 , address-71e256ff84a7013914df36e763a253115abc7b99 +nodepath-m/44'/60'/0'/13/ , relayerIndex-0 , address-89cb5f55dbec7a008589d04959719032842c41aa +nodepath-m/44'/60'/0'/13/ , relayerIndex-1 , address-4bae3650a0e591b73463389364c3803878c8bb03 +nodepath-m/44'/60'/0'/13/ , relayerIndex-2 , address-a1a586bf8c8878422d6f67abc65ea47fd7721b6f +nodepath-m/44'/60'/0'/13/ , relayerIndex-3 , address-365e2f57275323aa8f79a442111b3af3d42703cd +nodepath-m/44'/60'/0'/13/ , relayerIndex-4 , address-341c616edd92fe8113265c6e43e16b97e12b1c50 +nodepath-m/44'/60'/0'/13/ , relayerIndex-5 , address-3c552206a47b46182a8a10548fde8b567b12ba1e +nodepath-m/44'/60'/0'/13/ , relayerIndex-6 , address-0c7a53b14c1cf9d8e08359db70ce8579323b2694 +nodepath-m/44'/60'/0'/13/ , relayerIndex-7 , address-075edb8527dc603564e3bb4ab0ebaac05b0397cc +nodepath-m/44'/60'/0'/13/ , relayerIndex-8 , address-c49f3bf868e55f049729204607b310f588b53cff +nodepath-m/44'/60'/0'/13/ , relayerIndex-9 , address-f8451117f95b97aabf790d50a3b5744bd9e7ac5c +nodepath-m/44'/60'/0'/13/ , relayerIndex-10 , address-5da99c49842b4563648c3f2c3b23af97d511456f +nodepath-m/44'/60'/0'/13/ , relayerIndex-11 , address-13f920c4b632232d01753db4a2ef58879cc4e0bd +nodepath-m/44'/60'/0'/13/ , relayerIndex-12 , address-caa849863dbfc595d8073f724e9c35b4839d62b6 +nodepath-m/44'/60'/0'/13/ , relayerIndex-13 , address-25601506164764139864eba2019cf612ce48f387 +nodepath-m/44'/60'/0'/13/ , relayerIndex-14 , address-816aa071b171e0a98b7c38bedd70bb159ccd09f9 +nodepath-m/44'/60'/0'/13/ , relayerIndex-15 , address-76f8033dfaf42b55c6fbd686e025fafba29c68af +nodepath-m/44'/60'/0'/13/ , relayerIndex-16 , address-7a987acebfd37888e85c7fceed6792d5d532c68b +nodepath-m/44'/60'/0'/13/ , relayerIndex-17 , address-1e1bf874e63e25fd836a8f314452931330b61ed8 +nodepath-m/44'/60'/0'/13/ , relayerIndex-18 , address-c636de593b75e8c19aba2b58bdc64bd839fd35ee +nodepath-m/44'/60'/0'/13/ , relayerIndex-19 , address-4f6964611ccdb23961ccb09da74f3094c7297208 +nodepath-m/44'/60'/0'/13/ , relayerIndex-20 , address-117fad240d0b0c022c939520f3be60768e904df0 +nodepath-m/44'/60'/0'/13/ , relayerIndex-21 , address-2f253bcfe0037fb13bbbffd0a20da204258fc558 +nodepath-m/44'/60'/0'/13/ , relayerIndex-22 , address-83b43032b40b05d0be16b5f1203ed492d94b1f99 +nodepath-m/44'/60'/0'/13/ , relayerIndex-23 , address-14889d0bb0f4f84b2b8cfa9dba39b7f988a5c2ab +nodepath-m/44'/60'/0'/13/ , relayerIndex-24 , address-edb097f081824b0af170959f3e4e968981e4248e +nodepath-m/44'/60'/0'/13/ , relayerIndex-25 , address-2bca7d2c35fdfa4b3b54c30a3d80fc4eb7e853fe +nodepath-m/44'/60'/0'/13/ , relayerIndex-26 , address-7fef247418197a226e0ec8fb4c21285d5492c5aa +nodepath-m/44'/60'/0'/13/ , relayerIndex-27 , address-4568d9032cd39f7bbe9a02944ff91d0e8aaa6c6a +nodepath-m/44'/60'/0'/13/ , relayerIndex-28 , address-1b994ad6b8959f0f0d86cbb5c46b6e61f5b6d538 +nodepath-m/44'/60'/0'/13/ , relayerIndex-29 , address-cf47e363093db91420262dd7c6b5d82e30888d66 +nodepath-m/44'/60'/0'/13/ , relayerIndex-30 , address-5af13f06bca8c695e880c9308fd3c47fb00d3e17 +nodepath-m/44'/60'/0'/13/ , relayerIndex-31 , address-18c6418e433568a50010ebc8f15fa41c039021ce +nodepath-m/44'/60'/0'/13/ , relayerIndex-32 , address-864832a14d5819ea4cee089a24702df45bb33216 +nodepath-m/44'/60'/0'/13/ , relayerIndex-33 , address-16adf81b5b42fe2e80344237df3a64d5387e795d +nodepath-m/44'/60'/0'/13/ , relayerIndex-34 , address-7184af45ed3a5d02b9981c74a9be1452472ceeb5 +nodepath-m/44'/60'/0'/13/ , relayerIndex-35 , address-24077c3a905f891732f212c6f2d7fad413c8c884 +nodepath-m/44'/60'/0'/13/ , relayerIndex-36 , address-6027b72cb0c61e0d1f5df15e95a49020c6eb763d +nodepath-m/44'/60'/0'/13/ , relayerIndex-37 , address-86ef4b1677a526d2bb9cf65b89d6c6d75e8ac883 +nodepath-m/44'/60'/0'/13/ , relayerIndex-38 , address-fcf6e67d85f606640578063066dd248e9b237cbf +nodepath-m/44'/60'/0'/13/ , relayerIndex-39 , address-843e5a33976cf10cfe1a1d90ccfec310a6bc7758 +nodepath-m/44'/60'/0'/13/ , relayerIndex-40 , address-2916521d3d4d089f0c55de25bc0edc57e4e05f5c +nodepath-m/44'/60'/0'/13/ , relayerIndex-41 , address-13f5ce484c26c25791cf7b10949cde6aac6458a0 +nodepath-m/44'/60'/0'/13/ , relayerIndex-42 , address-3f37dcea38a5532148940d7f4abde67faa3eda3b +nodepath-m/44'/60'/0'/13/ , relayerIndex-43 , address-2b4cd616f7ff8b8ba7b50c267ba0ab8ee37402f5 +nodepath-m/44'/60'/0'/13/ , relayerIndex-44 , address-d326fd58aa6a26de76949d57d46c7434d24434e7 +nodepath-m/44'/60'/0'/13/ , relayerIndex-45 , address-d4151f75ba9f2c872b14e72ab4486b91291dab6f +nodepath-m/44'/60'/0'/13/ , relayerIndex-46 , address-f3ed477d3ded4c224c591ab9d96a6b9dad85359f +nodepath-m/44'/60'/0'/13/ , relayerIndex-47 , address-06e0d5a35070fd3b95ccfa88e68d9c40d57a07c5 +nodepath-m/44'/60'/0'/13/ , relayerIndex-48 , address-074b8c37e6cd8df2f7fb8d3037efa2e6182f775e +nodepath-m/44'/60'/0'/13/ , relayerIndex-49 , address-8520f7f1c5871a5410b874942bff238982c3311e +nodepath-m/44'/60'/0'/14/ , relayerIndex-0 , address-49e626074fca7a5e1f05f61a6dd5654bc64a36de +nodepath-m/44'/60'/0'/14/ , relayerIndex-1 , address-d75fb35d5755c1b65752d61c6b4a2514e57776de +nodepath-m/44'/60'/0'/14/ , relayerIndex-2 , address-6dff5867c5340efb845359d12683922d9735ab33 +nodepath-m/44'/60'/0'/14/ , relayerIndex-3 , address-37b50129601b377262cd5117ab2019837b46162c +nodepath-m/44'/60'/0'/14/ , relayerIndex-4 , address-50b0842a179a43492c35ab8272a1efa30c5ea971 +nodepath-m/44'/60'/0'/14/ , relayerIndex-5 , address-577c335063fb98d8b6316add9515f9cccc6594c9 +nodepath-m/44'/60'/0'/14/ , relayerIndex-6 , address-c49bbec787677c3ecff1b15b7b201a7a6382301d +nodepath-m/44'/60'/0'/14/ , relayerIndex-7 , address-719bd8a091b361eae5dd4230eac9475dafb689ff +nodepath-m/44'/60'/0'/14/ , relayerIndex-8 , address-ae39b923683aca9d1143a5e27e379acd6537dcc3 +nodepath-m/44'/60'/0'/14/ , relayerIndex-9 , address-d460f40517c02725edefb37686fed02abc5c7e05 +nodepath-m/44'/60'/0'/14/ , relayerIndex-10 , address-38a63dac0a6af61618202adecebe787682775854 +nodepath-m/44'/60'/0'/14/ , relayerIndex-11 , address-307d3dc118f0a12b607de2e021a0507a11f7e9fa +nodepath-m/44'/60'/0'/14/ , relayerIndex-12 , address-036afab115346dfccc8916858fdedcaec8a5ae7c +nodepath-m/44'/60'/0'/14/ , relayerIndex-13 , address-44d91953edfcbc539e9507c6fbb5c93100c47def +nodepath-m/44'/60'/0'/14/ , relayerIndex-14 , address-fde1645758120ae9e17fb4018ff0db7fbcf32c94 +nodepath-m/44'/60'/0'/14/ , relayerIndex-15 , address-eed8600215700749c4c61036ddb24e04ae19e865 +nodepath-m/44'/60'/0'/14/ , relayerIndex-16 , address-835c7b7faccf90cb3785a695373fd8ebea234e5b +nodepath-m/44'/60'/0'/14/ , relayerIndex-17 , address-7ab22d9831b511fc496600476c86f9a8ab820a33 +nodepath-m/44'/60'/0'/14/ , relayerIndex-18 , address-643ab6b11f17f6b5c099586af37b6570e59eb68e +nodepath-m/44'/60'/0'/14/ , relayerIndex-19 , address-07bd8a33e05eaf10b529d0f340b72a40a529bf22 +nodepath-m/44'/60'/0'/14/ , relayerIndex-20 , address-d7a394ce3dac1453487970ea9bf23f8f6dd796a4 +nodepath-m/44'/60'/0'/14/ , relayerIndex-21 , address-16dd757786a5114e131a94d0491b6c15c964cadb +nodepath-m/44'/60'/0'/14/ , relayerIndex-22 , address-c2ffd2f39549a481869ccb69576869a6f567ddae +nodepath-m/44'/60'/0'/14/ , relayerIndex-23 , address-1e071883305c334529927ba9aa8f5ba15bf8bc4d +nodepath-m/44'/60'/0'/14/ , relayerIndex-24 , address-a10b5fe7e06f1a7d29fac32c0343165264f0e6f6 +nodepath-m/44'/60'/0'/14/ , relayerIndex-25 , address-94dbf5f8c3586abb6b7f1c2d552f814041c728dc +nodepath-m/44'/60'/0'/14/ , relayerIndex-26 , address-cfe5ac927913983a47b26507aabf9ee869f56bd6 +nodepath-m/44'/60'/0'/14/ , relayerIndex-27 , address-4c2109b3debae54307db0176af3c277432d77d83 +nodepath-m/44'/60'/0'/14/ , relayerIndex-28 , address-d909dc133c45c34d88d3f06426246e8c2a732e2b +nodepath-m/44'/60'/0'/14/ , relayerIndex-29 , address-4f879d7817511eb72fe2b309c65a14b16a0930fa +nodepath-m/44'/60'/0'/14/ , relayerIndex-30 , address-64857df21c470690b41a63b89afa04e7ee1935cb +nodepath-m/44'/60'/0'/14/ , relayerIndex-31 , address-299e269c2745c02d157503e6899c984dfa827106 +nodepath-m/44'/60'/0'/14/ , relayerIndex-32 , address-c2b0540d718bf46c5be7e5e9f6b2719d003fa059 +nodepath-m/44'/60'/0'/14/ , relayerIndex-33 , address-00e32b910635b16f1ae0dcb7b2a5479cef0a9371 +nodepath-m/44'/60'/0'/14/ , relayerIndex-34 , address-9a3f734f2a149e74dcd0726c3c75067c2a03c795 +nodepath-m/44'/60'/0'/14/ , relayerIndex-35 , address-8d273f6e1826e092914ef12176142ca2aa79c2d2 +nodepath-m/44'/60'/0'/14/ , relayerIndex-36 , address-62c3a363e05f669e52b816e3a11dc033649dc3ef +nodepath-m/44'/60'/0'/14/ , relayerIndex-37 , address-5fd375dfbd3020c1a60d4879aeedefc2449c3ab1 +nodepath-m/44'/60'/0'/14/ , relayerIndex-38 , address-acbde69d1383400964f5068ab79f7b465b984c66 +nodepath-m/44'/60'/0'/14/ , relayerIndex-39 , address-b85b29d3201945581a3b2eedc5891cb8a987406b +nodepath-m/44'/60'/0'/14/ , relayerIndex-40 , address-c0a2016c0070c334f3ea4efbf649a4ef613aa392 +nodepath-m/44'/60'/0'/14/ , relayerIndex-41 , address-4ac79c73277306eba8667803886be192cddb750e +nodepath-m/44'/60'/0'/14/ , relayerIndex-42 , address-ed7c1fce512f9b957f5eec074ec337e279faf70d +nodepath-m/44'/60'/0'/14/ , relayerIndex-43 , address-e0d1b0a8a5cfc6ad9c78dba9a987da8be7e9d233 +nodepath-m/44'/60'/0'/14/ , relayerIndex-44 , address-c7804222fd55d8e6cbbbd02418c379053aaf6997 +nodepath-m/44'/60'/0'/14/ , relayerIndex-45 , address-08777b667708403f44dfa474c7e47a82a7851086 +nodepath-m/44'/60'/0'/14/ , relayerIndex-46 , address-17a335073fa19557551758b53d52ff34e2086a88 +nodepath-m/44'/60'/0'/14/ , relayerIndex-47 , address-411c4f3fc08849e497357efcc35bd90d6bf58efe +nodepath-m/44'/60'/0'/14/ , relayerIndex-48 , address-c4a5448ddea3e851571bd42f3a7bced2bccec536 +nodepath-m/44'/60'/0'/14/ , relayerIndex-49 , address-eff3d824ec1eff49b05b045d56add1e0eb1ea895 +nodepath-m/44'/60'/0'/15/ , relayerIndex-0 , address-03869736286cb9f75ecc5628b1c0e2bb1ecf4201 +nodepath-m/44'/60'/0'/15/ , relayerIndex-1 , address-f2ce9ec48c07ad0665efca31e8e8efc8f755d89b +nodepath-m/44'/60'/0'/15/ , relayerIndex-2 , address-00f3388c887d58b766d6078e70effdb893d6219d +nodepath-m/44'/60'/0'/15/ , relayerIndex-3 , address-409802c54dccdef434044b02f65067428da38d92 +nodepath-m/44'/60'/0'/15/ , relayerIndex-4 , address-3ed11fab649c85b34d4c48875fec75fd567de93a +nodepath-m/44'/60'/0'/15/ , relayerIndex-5 , address-5410a2dc4b8f11b52c7b46b72c5740b93765037f +nodepath-m/44'/60'/0'/15/ , relayerIndex-6 , address-d3f7bc0c223b6035e2cdb00cdf40159eadad8458 +nodepath-m/44'/60'/0'/15/ , relayerIndex-7 , address-c9b9f98dd2a94c2690a5014b82e1569cd626879a +nodepath-m/44'/60'/0'/15/ , relayerIndex-8 , address-cc5a3134cb8e98de6dc570bdfc44e83fc9a5d9cc +nodepath-m/44'/60'/0'/15/ , relayerIndex-9 , address-4668fc5d478747fa13f7702d7f4d8690dfaf70ce +nodepath-m/44'/60'/0'/15/ , relayerIndex-10 , address-7f2b783f3d88020beb7252ced259cde87af01a03 +nodepath-m/44'/60'/0'/15/ , relayerIndex-11 , address-cdc1c1a357862c9e5ca981744dbb32702601f4cf +nodepath-m/44'/60'/0'/15/ , relayerIndex-12 , address-93c395fc64a7eac2501f94bbf03fa6f0010a9133 +nodepath-m/44'/60'/0'/15/ , relayerIndex-13 , address-25ef19f8ec51842f45affb5be0c468e83c0c2f5d +nodepath-m/44'/60'/0'/15/ , relayerIndex-14 , address-b419077117c3135f27eaf176c0f895d3e6fa68c9 +nodepath-m/44'/60'/0'/15/ , relayerIndex-15 , address-79278d8b92d9472eb09d985bc529790de911d808 +nodepath-m/44'/60'/0'/15/ , relayerIndex-16 , address-5e3849baae02ca796d652fb26e81233863ba5e34 +nodepath-m/44'/60'/0'/15/ , relayerIndex-17 , address-bb8bb7e24bc019c83c6b3209c144150aaf72bbf8 +nodepath-m/44'/60'/0'/15/ , relayerIndex-18 , address-c5eb8a1ecf49391444a0d53cd0f6139fc1f076dd +nodepath-m/44'/60'/0'/15/ , relayerIndex-19 , address-bf671daef1b0007577ca83aaed9711f3cf624e5b +nodepath-m/44'/60'/0'/15/ , relayerIndex-20 , address-586b883671a58cdbf44cf10bb7257d3792aa8ac9 +nodepath-m/44'/60'/0'/15/ , relayerIndex-21 , address-c5cd793f2d66b692b8ec0fb1b99f4ce06fd388e2 +nodepath-m/44'/60'/0'/15/ , relayerIndex-22 , address-a7bc6fe691557132088d8f54a11f27bd35e1a157 +nodepath-m/44'/60'/0'/15/ , relayerIndex-23 , address-0342241b3241b1e796bea225c4e0195653669761 +nodepath-m/44'/60'/0'/15/ , relayerIndex-24 , address-fd1949e9c7ae9423345b29b9af4573f5146ceb23 +nodepath-m/44'/60'/0'/15/ , relayerIndex-25 , address-f93ccdaa84e3757ce671b97db26cd6405a6d9905 +nodepath-m/44'/60'/0'/15/ , relayerIndex-26 , address-6fb027a15940f1cf2e4f44dab0b4735366de27fb +nodepath-m/44'/60'/0'/15/ , relayerIndex-27 , address-731d851d10480ac5a3084a48e7a4a87e94c7302c +nodepath-m/44'/60'/0'/15/ , relayerIndex-28 , address-25cb5e7a715d2de37d43aa20f2d5a423eef5c26d +nodepath-m/44'/60'/0'/15/ , relayerIndex-29 , address-c88cb31264fd28ce8390581bfb6aa5f84d32d274 +nodepath-m/44'/60'/0'/15/ , relayerIndex-30 , address-a2ef5714b4971a544bc911d1783069169c480dc3 +nodepath-m/44'/60'/0'/15/ , relayerIndex-31 , address-d5a20feac9e438a47200bc679f531f3e5dcc111a +nodepath-m/44'/60'/0'/15/ , relayerIndex-32 , address-69d14e2fb41e4c91f25ec363cee7f8eda53cd632 +nodepath-m/44'/60'/0'/15/ , relayerIndex-33 , address-3b64aeacac76ef083efa51bcb5ca9335dd7e5e05 +nodepath-m/44'/60'/0'/15/ , relayerIndex-34 , address-66e8079380c01fa246db91cfb1d0574611c16a89 +nodepath-m/44'/60'/0'/15/ , relayerIndex-35 , address-6b36d8351b8d3a4d25e40c2ca01de8559b29db4c +nodepath-m/44'/60'/0'/15/ , relayerIndex-36 , address-b34a8940fc0627ced7c72e28a0204c402389d192 +nodepath-m/44'/60'/0'/15/ , relayerIndex-37 , address-d024bc7fa2d40f02d711787b0a33e19bb54b715e +nodepath-m/44'/60'/0'/15/ , relayerIndex-38 , address-6beb02d861e3be85eb9cdcec4ab65166b282074d +nodepath-m/44'/60'/0'/15/ , relayerIndex-39 , address-2c0dc4346cdb2858445ab3b9125616bdb224d89d +nodepath-m/44'/60'/0'/15/ , relayerIndex-40 , address-dd2de3e609f5a64925933e0e23ca339c04a2a027 +nodepath-m/44'/60'/0'/15/ , relayerIndex-41 , address-6162e70abdc93cb90368109d154b849fbe77d1b9 +nodepath-m/44'/60'/0'/15/ , relayerIndex-42 , address-8e646fd3ebd1b97a904370a0f5ca423b49db6d3f +nodepath-m/44'/60'/0'/15/ , relayerIndex-43 , address-827187e3980f8e1c76d9b754f4b3918ce2659330 +nodepath-m/44'/60'/0'/15/ , relayerIndex-44 , address-7f6e4838275866b2225fd2e90a7e5ed8f18645b2 +nodepath-m/44'/60'/0'/15/ , relayerIndex-45 , address-8b85ba23424f1d688c75bacf4ad6d026383bf85a +nodepath-m/44'/60'/0'/15/ , relayerIndex-46 , address-6e475560ab0a95286a937aef58a3bbccacaf1a93 +nodepath-m/44'/60'/0'/15/ , relayerIndex-47 , address-7d1176ce1935815084c48baec37144c9ad75debb +nodepath-m/44'/60'/0'/15/ , relayerIndex-48 , address-45290719c7655ebec5bbe252037a553836a69ed4 +nodepath-m/44'/60'/0'/15/ , relayerIndex-49 , address-1411b878b09856938abab4e87fd14657f1bf4004 +nodepath-m/44'/60'/0'/16/ , relayerIndex-0 , address-77ceae7b7f48bf1f6e3ef91db17a9daad148ba49 +nodepath-m/44'/60'/0'/16/ , relayerIndex-1 , address-c3b4b6df9760101d644578b656419776a5f12b0e +nodepath-m/44'/60'/0'/16/ , relayerIndex-2 , address-62883f2ec3e96b0c4ebe4eab95686dd5b90fc000 +nodepath-m/44'/60'/0'/16/ , relayerIndex-3 , address-de44ba2059d32bc4ce58d919bd0f1f8906718c62 +nodepath-m/44'/60'/0'/16/ , relayerIndex-4 , address-3a31c45c20f2576d26b3074c912883266ff9bbe5 +nodepath-m/44'/60'/0'/16/ , relayerIndex-5 , address-2ea354a6318e1f81f9a02ef8be3f87c59a466166 +nodepath-m/44'/60'/0'/16/ , relayerIndex-6 , address-cdfcb88c3fd091f515790a7c8a173668f2694cf6 +nodepath-m/44'/60'/0'/16/ , relayerIndex-7 , address-ee71b0dbcff0cfabe6e6d28297ba387012ac807d +nodepath-m/44'/60'/0'/16/ , relayerIndex-8 , address-d1d2fa76fc3555b71f9d47fa9392f541a3280438 +nodepath-m/44'/60'/0'/16/ , relayerIndex-9 , address-9a6043f63820bee53e7945835360d04d68ede1c8 +nodepath-m/44'/60'/0'/16/ , relayerIndex-10 , address-3185d07cc29bbf6c2db5c0cdc13bf730964a0c2e +nodepath-m/44'/60'/0'/16/ , relayerIndex-11 , address-fa98c96c7093b71ca145808ec74c998c6e124a22 +nodepath-m/44'/60'/0'/16/ , relayerIndex-12 , address-0140c0347c72ea59044bc5ff04590947e6c5e08b +nodepath-m/44'/60'/0'/16/ , relayerIndex-13 , address-8fff01924d92773634d5f5f1b7ffd2a7973bf97a +nodepath-m/44'/60'/0'/16/ , relayerIndex-14 , address-92a3aefe0ec174ba2b452614c509d6310e88fc1f +nodepath-m/44'/60'/0'/16/ , relayerIndex-15 , address-aaf570a7bfca616b3be59f7822b7aa79afbc0f32 +nodepath-m/44'/60'/0'/16/ , relayerIndex-16 , address-d0d1b9c0c53b9f4f9862f0f4a4785f07a117d9f1 +nodepath-m/44'/60'/0'/16/ , relayerIndex-17 , address-07fe689c5b4b65696ff7b268f566492ef87c9965 +nodepath-m/44'/60'/0'/16/ , relayerIndex-18 , address-c0e5eeb115d00bbad5bd01170fa296acef4e329c +nodepath-m/44'/60'/0'/16/ , relayerIndex-19 , address-21dc2137056da73d56030440c9a6c0412c6da4ee +nodepath-m/44'/60'/0'/16/ , relayerIndex-20 , address-cad90e0d967c189b9809a609f30049bfafeda5d4 +nodepath-m/44'/60'/0'/16/ , relayerIndex-21 , address-af65bf0c3cd4aff241ea4a168a1372cb6a823cd0 +nodepath-m/44'/60'/0'/16/ , relayerIndex-22 , address-00f849ac62485fe7d4ce0ac6f5d181d5ee071978 +nodepath-m/44'/60'/0'/16/ , relayerIndex-23 , address-cc7f27fde373a9343ef115f768e9b7272cc45751 +nodepath-m/44'/60'/0'/16/ , relayerIndex-24 , address-50d8d6ac50a4ca5e7327f6a3c57d0b552e518cd7 +nodepath-m/44'/60'/0'/16/ , relayerIndex-25 , address-7cd793278eb9c28f66f4cefede3403769cd44923 +nodepath-m/44'/60'/0'/16/ , relayerIndex-26 , address-a309fc28724048e0d87f8614ec08d176d9761f4d +nodepath-m/44'/60'/0'/16/ , relayerIndex-27 , address-cd7e7e5ae02f9e903fd177460ce40663fd85e883 +nodepath-m/44'/60'/0'/16/ , relayerIndex-28 , address-3840a5afa10b70674c79152bd0742a42d127b6b0 +nodepath-m/44'/60'/0'/16/ , relayerIndex-29 , address-44407381d43a855b2235c63316a70832976ae72f +nodepath-m/44'/60'/0'/16/ , relayerIndex-30 , address-f928e9cd2562939d1c5b495c6637f992e3a66799 +nodepath-m/44'/60'/0'/16/ , relayerIndex-31 , address-6f88f01b81f67f6736877c76a371eb43d0ad8731 +nodepath-m/44'/60'/0'/16/ , relayerIndex-32 , address-ead71b2a5ffd2d04e82804b4fcd9cd55fce70316 +nodepath-m/44'/60'/0'/16/ , relayerIndex-33 , address-21a7d3b2442cfcfcacad01c9e80e25e8b4d9a975 +nodepath-m/44'/60'/0'/16/ , relayerIndex-34 , address-076295f11ce453f1a0f3d49d1683fba3805fb8e4 +nodepath-m/44'/60'/0'/16/ , relayerIndex-35 , address-7ffea87c7f8940880a91ac2f966177a5c57c1289 +nodepath-m/44'/60'/0'/16/ , relayerIndex-36 , address-545085c19122d4e80b4fc39a65804cf9e85e8b9e +nodepath-m/44'/60'/0'/16/ , relayerIndex-37 , address-2ebce983876322d7327ebdf06fed7e1499d3ce93 +nodepath-m/44'/60'/0'/16/ , relayerIndex-38 , address-b0fd5b131a349d4be5361153bfefcbe3126921bd +nodepath-m/44'/60'/0'/16/ , relayerIndex-39 , address-03d6f0290943311b145f02c1d975dd06393cbd46 +nodepath-m/44'/60'/0'/16/ , relayerIndex-40 , address-ca25bab3e9e4923b93f98d42fd234258d724ee31 +nodepath-m/44'/60'/0'/16/ , relayerIndex-41 , address-d04b9825db98863a0f057ac55e02fb2bf5d61673 +nodepath-m/44'/60'/0'/16/ , relayerIndex-42 , address-9ce0c3abe2580873eae24fa8bb81ac73a7f091b5 +nodepath-m/44'/60'/0'/16/ , relayerIndex-43 , address-ed4484ba71a9234abc1ec9fe374ee51471f244c4 +nodepath-m/44'/60'/0'/16/ , relayerIndex-44 , address-c82e2e27f44bd425f20e7589129a621c53d9622b +nodepath-m/44'/60'/0'/16/ , relayerIndex-45 , address-a05a25f052f99b3c521388418a3814237c646f9f +nodepath-m/44'/60'/0'/16/ , relayerIndex-46 , address-23f8516d30812761b383ffafa5422f46ffc71b07 +nodepath-m/44'/60'/0'/16/ , relayerIndex-47 , address-ec3c65db2058ac93d22bcf70c6f97a19a23ee691 +nodepath-m/44'/60'/0'/16/ , relayerIndex-48 , address-901f2099c530e5822c7940a0eae8b2bbcf0162d6 +nodepath-m/44'/60'/0'/16/ , relayerIndex-49 , address-6b876d8e9ce4283c33e4b5d15d9dd4d40c9c6cc3 +nodepath-m/44'/60'/0'/17/ , relayerIndex-0 , address-4a08b8daf3482138e5a1a586f49bbd16a86c97f0 +nodepath-m/44'/60'/0'/17/ , relayerIndex-1 , address-8bfcb48c0ef152311235034744dc3ffc68d29ddf +nodepath-m/44'/60'/0'/17/ , relayerIndex-2 , address-b8c03c7c601c98226778e47d5bb6bb7039176358 +nodepath-m/44'/60'/0'/17/ , relayerIndex-3 , address-5811cb9220d220b6235ef3847f549e5f93be8959 +nodepath-m/44'/60'/0'/17/ , relayerIndex-4 , address-103598ee7956ae1b8c4fc26eae81b4a5d4ed6ca3 +nodepath-m/44'/60'/0'/17/ , relayerIndex-5 , address-8e7770648be6f9b3d2518d6db13c953f5c643bf0 +nodepath-m/44'/60'/0'/17/ , relayerIndex-6 , address-eab6c170a9c64f1bb8ef3fd36406453ab5260d95 +nodepath-m/44'/60'/0'/17/ , relayerIndex-7 , address-d35c3cb0dbfbb6753e90c72fac90267b9705eff7 +nodepath-m/44'/60'/0'/17/ , relayerIndex-8 , address-110a43a2fb5e091470411b5094cd262c5e06b83b +nodepath-m/44'/60'/0'/17/ , relayerIndex-9 , address-f9264964dcf2ee2b860b0c1cd73bf529cf2c0dba +nodepath-m/44'/60'/0'/17/ , relayerIndex-10 , address-fa37f37507d5775afb57cd16a96b6d5b0ed2bffd +nodepath-m/44'/60'/0'/17/ , relayerIndex-11 , address-edb95a1fe6bdd047eab8893d92b804f3a9435c6b +nodepath-m/44'/60'/0'/17/ , relayerIndex-12 , address-135d891f2ecc35fa68b5332e6f94bb1069534533 +nodepath-m/44'/60'/0'/17/ , relayerIndex-13 , address-ec0933a89dcd2730a575d70311f220f66ec0df43 +nodepath-m/44'/60'/0'/17/ , relayerIndex-14 , address-9a1aa7cc66a041a50288331d700a9397958ac56d +nodepath-m/44'/60'/0'/17/ , relayerIndex-15 , address-d1f578761f7b39d8e9af392b4207b90575913afe +nodepath-m/44'/60'/0'/17/ , relayerIndex-16 , address-6e4d4a6a2fa9da96e80f8db62408525d9edaeda4 +nodepath-m/44'/60'/0'/17/ , relayerIndex-17 , address-b02740ccfbb72832719f19680b2734ef6e68a470 +nodepath-m/44'/60'/0'/17/ , relayerIndex-18 , address-4c0f3bf27d34b67e4fa9638d540c5ee4144ccda1 +nodepath-m/44'/60'/0'/17/ , relayerIndex-19 , address-95d40c25c2ea362d14850418263705480ba1e86e +nodepath-m/44'/60'/0'/17/ , relayerIndex-20 , address-3102f5e4475c6a0d4748b4382bbb631224c9a00d +nodepath-m/44'/60'/0'/17/ , relayerIndex-21 , address-60dc246e039d0b0fb4ad0fe30c5e2cbccb8e44bf +nodepath-m/44'/60'/0'/17/ , relayerIndex-22 , address-2f6dd987983da0e76a54db23b688d8b4fe6a1de5 +nodepath-m/44'/60'/0'/17/ , relayerIndex-23 , address-31193f08504b035e7f006085e5e4398bad0a5438 +nodepath-m/44'/60'/0'/17/ , relayerIndex-24 , address-ddb0837d62ff2cd009ef1e65070af66060be548d +nodepath-m/44'/60'/0'/17/ , relayerIndex-25 , address-621240421e9da330644aad16582eb658c7234bad +nodepath-m/44'/60'/0'/17/ , relayerIndex-26 , address-a97c44a46f80246740c77da84d968bada45e6eb8 +nodepath-m/44'/60'/0'/17/ , relayerIndex-27 , address-5ce120720c66eb74fc1175234fdf372f4150c874 +nodepath-m/44'/60'/0'/17/ , relayerIndex-28 , address-1763ae652e79227c0d04745059cd8a02e387d93c +nodepath-m/44'/60'/0'/17/ , relayerIndex-29 , address-fcfe5d04ac1edf961cae1b4bb8037532f9b92536 +nodepath-m/44'/60'/0'/17/ , relayerIndex-30 , address-90b038076237f0125abc3e1329f4a4dd700d7843 +nodepath-m/44'/60'/0'/17/ , relayerIndex-31 , address-a068deccb8a8e326e1b6b1d512c5fabff593c1c6 +nodepath-m/44'/60'/0'/17/ , relayerIndex-32 , address-afbd0871c819e9f217d89b4044e47b731f358649 +nodepath-m/44'/60'/0'/17/ , relayerIndex-33 , address-5cc65775f93e58e9dd4634e85d4ed1f8e4a74cdd +nodepath-m/44'/60'/0'/17/ , relayerIndex-34 , address-904a370ed6faf2ab35c936eb3fb7287ea1a0aaa6 +nodepath-m/44'/60'/0'/17/ , relayerIndex-35 , address-ce292e801c6d76a64582cc4abfcdbebaa89c8e09 +nodepath-m/44'/60'/0'/17/ , relayerIndex-36 , address-cb3cca79cb2932156854ea39a8ca4cdc819b1f5d +nodepath-m/44'/60'/0'/17/ , relayerIndex-37 , address-46c285bcdc0ac8d392447ec74e32d05fdfe063dc +nodepath-m/44'/60'/0'/17/ , relayerIndex-38 , address-944f76409788b981b2c6f29909d6e0d3274294ea +nodepath-m/44'/60'/0'/17/ , relayerIndex-39 , address-9439fb7c5c22e6c89a20fcd57814074595a4b981 +nodepath-m/44'/60'/0'/17/ , relayerIndex-40 , address-3d1c8058f49c750503bd065db63553e2de96e3ae +nodepath-m/44'/60'/0'/17/ , relayerIndex-41 , address-f63cd344cc05e7ff56503deab983864521d34b4c +nodepath-m/44'/60'/0'/17/ , relayerIndex-42 , address-cf3cb3cd2ee353296a3348677572cd51ac9b9a7c +nodepath-m/44'/60'/0'/17/ , relayerIndex-43 , address-eaa18e99f2dd66c1b6bfc5be095d19b27ba560c5 +nodepath-m/44'/60'/0'/17/ , relayerIndex-44 , address-30b0fc41e732e37015efe81760788808db3e8564 +nodepath-m/44'/60'/0'/17/ , relayerIndex-45 , address-43b86af6957657982873b2571a6e519913211047 +nodepath-m/44'/60'/0'/17/ , relayerIndex-46 , address-47bdec4b284c9b9a2d340a75d0c5a4fe11ac467e +nodepath-m/44'/60'/0'/17/ , relayerIndex-47 , address-37bc5d2c8281d2ef071064cd1ad651ecad805769 +nodepath-m/44'/60'/0'/17/ , relayerIndex-48 , address-1a56b0b6c98d31cf73346fb623acf999f3aefc2c +nodepath-m/44'/60'/0'/17/ , relayerIndex-49 , address-83841d08b1aec0352b90b77f75231fcd2cd3928c +nodepath-m/44'/60'/0'/18/ , relayerIndex-0 , address-8bf4e5e3253d50931cad885015e503ed951b8c4b +nodepath-m/44'/60'/0'/18/ , relayerIndex-1 , address-488fc28cdccd85c23aa3fc77fea92ae659b6370a +nodepath-m/44'/60'/0'/18/ , relayerIndex-2 , address-b6112aecf78ffea10f324430e2512d2fbcf2a76a +nodepath-m/44'/60'/0'/18/ , relayerIndex-3 , address-93faea7b94e064405a0324591703d31f1625716b +nodepath-m/44'/60'/0'/18/ , relayerIndex-4 , address-bec03d6293ddba1b3c0b2c40032c98819270d265 +nodepath-m/44'/60'/0'/18/ , relayerIndex-5 , address-4d67aefee839cd62b5f95e60e8eeaec8b609e9f4 +nodepath-m/44'/60'/0'/18/ , relayerIndex-6 , address-f5730b6fd36f1023298f7fc0e9259fc5521d885f +nodepath-m/44'/60'/0'/18/ , relayerIndex-7 , address-efb0f40f393d5353cf47efa431b2863f5e4e2e73 +nodepath-m/44'/60'/0'/18/ , relayerIndex-8 , address-c007271e6149682384900795950e803d549bf2c2 +nodepath-m/44'/60'/0'/18/ , relayerIndex-9 , address-ee0757a046cadb953b975817b79f7c81c6819f54 +nodepath-m/44'/60'/0'/18/ , relayerIndex-10 , address-96fdad98579c0b43dd5b0d67db1efcd10264df90 +nodepath-m/44'/60'/0'/18/ , relayerIndex-11 , address-c215cd7f60cd0bdcf50a102aaf2392a68235f8e6 +nodepath-m/44'/60'/0'/18/ , relayerIndex-12 , address-e54d08f8cd7596023c5a37827ce1d1df415ddc0e +nodepath-m/44'/60'/0'/18/ , relayerIndex-13 , address-e2c0123d962c5ae30cb66f4b8f343dfc7eb62e69 +nodepath-m/44'/60'/0'/18/ , relayerIndex-14 , address-1997cc0ff9660be3bc65fb4e3c9ad79b33a37656 +nodepath-m/44'/60'/0'/18/ , relayerIndex-15 , address-971a7ad05636a2f3a6e5593c92be617d1bff7aeb +nodepath-m/44'/60'/0'/18/ , relayerIndex-16 , address-eed0f20edcebf55df3489d2f8588c333658d17c7 +nodepath-m/44'/60'/0'/18/ , relayerIndex-17 , address-c3cafc3ada78ee48020d1ad2643306d44a143ff4 +nodepath-m/44'/60'/0'/18/ , relayerIndex-18 , address-9efce952abe5309cf3d2ceae3c4b953a709c142f +nodepath-m/44'/60'/0'/18/ , relayerIndex-19 , address-a5b5effdcc84692d3a1c7653d17d0af470d87324 +nodepath-m/44'/60'/0'/18/ , relayerIndex-20 , address-0ef934c64f0218051a54445993970b0f0e0e0f55 +nodepath-m/44'/60'/0'/18/ , relayerIndex-21 , address-c9de8a6fafa4db7478ce1d0a29660b9e70123c40 +nodepath-m/44'/60'/0'/18/ , relayerIndex-22 , address-9039caf6904e5fff62bbf3b691c4f926eaed6ea6 +nodepath-m/44'/60'/0'/18/ , relayerIndex-23 , address-3cf1058b48e8c6a9de8ab63578386461f42775d5 +nodepath-m/44'/60'/0'/18/ , relayerIndex-24 , address-3fedadc4fe0e3bb9f79dc79f98b394b1cd0317db +nodepath-m/44'/60'/0'/18/ , relayerIndex-25 , address-25b0889f9418ddc79a727b740dc2ec049ea9e7f9 +nodepath-m/44'/60'/0'/18/ , relayerIndex-26 , address-aba896b1fce1cb6a1b48f26291df1f605ae9472c +nodepath-m/44'/60'/0'/18/ , relayerIndex-27 , address-379c8df367a560c5ddba063ef9f50178126174d3 +nodepath-m/44'/60'/0'/18/ , relayerIndex-28 , address-fd77351908d3560714225bb2c0e9d3d3738e8679 +nodepath-m/44'/60'/0'/18/ , relayerIndex-29 , address-0fe3f05ac4244d1f98bc4e3b99e4f18fa2cd2e4d +nodepath-m/44'/60'/0'/18/ , relayerIndex-30 , address-52157e03c43b56bbe7cbd77f230c2200fe85b91b +nodepath-m/44'/60'/0'/18/ , relayerIndex-31 , address-01f980e121b8c705b55cc1ffed7d4a3de625fefc +nodepath-m/44'/60'/0'/18/ , relayerIndex-32 , address-ecb928e89a596e3497ed789211630ec657f0d443 +nodepath-m/44'/60'/0'/18/ , relayerIndex-33 , address-175881c9addba9d730bef55670267bb6af006059 +nodepath-m/44'/60'/0'/18/ , relayerIndex-34 , address-51d0a3a47a9176c51cdc2f2e329c466b55dc9e69 +nodepath-m/44'/60'/0'/18/ , relayerIndex-35 , address-b9e438882d013dc28adb955be7019cf8a1fc556b +nodepath-m/44'/60'/0'/18/ , relayerIndex-36 , address-45a6cb1a3f4759bacb222b95a4bda6b757577a58 +nodepath-m/44'/60'/0'/18/ , relayerIndex-37 , address-fd493fee70c91c5a5aba85f39de2395d3ae69223 +nodepath-m/44'/60'/0'/18/ , relayerIndex-38 , address-b4ef5a374689a4789fa02131229c48fb392dec1f +nodepath-m/44'/60'/0'/18/ , relayerIndex-39 , address-9ae465c06296d9ce44670449d8e18c52784c7a7b +nodepath-m/44'/60'/0'/18/ , relayerIndex-40 , address-36b31d1f549f51eb7005652b5e9da8bbbc3518f9 +nodepath-m/44'/60'/0'/18/ , relayerIndex-41 , address-f3cd1c1ba48bf955fae5eff727d20c1494e8536f +nodepath-m/44'/60'/0'/18/ , relayerIndex-42 , address-73af07c1f1ce27abca2b6a91c05ccc97cbef09b8 +nodepath-m/44'/60'/0'/18/ , relayerIndex-43 , address-0fbf68ed83e8b56d0cc1e7607f18f21aba0d89f5 +nodepath-m/44'/60'/0'/18/ , relayerIndex-44 , address-db31868943552f793198ee5b92e9dcc57704b8af +nodepath-m/44'/60'/0'/18/ , relayerIndex-45 , address-d06bc65fda16211f57efb91f19de12adec235b74 +nodepath-m/44'/60'/0'/18/ , relayerIndex-46 , address-fe42ec87720f2046f79f277f952838583d0dda0d +nodepath-m/44'/60'/0'/18/ , relayerIndex-47 , address-de575fa0bc8877f9e2944abcd08f36d8c6361b7c +nodepath-m/44'/60'/0'/18/ , relayerIndex-48 , address-729092bdad3dda3fe4eb9106691e9d97170369f3 +nodepath-m/44'/60'/0'/18/ , relayerIndex-49 , address-d239a7cf07cab364a5e0fb5a0de7242b51ee1e6b +nodepath-m/44'/60'/0'/19/ , relayerIndex-0 , address-a22aba79f4349f7acba6f6381370341247cd22f7 +nodepath-m/44'/60'/0'/19/ , relayerIndex-1 , address-e4cd707bc539887ea81c518d34fc7eafca8622d6 +nodepath-m/44'/60'/0'/19/ , relayerIndex-2 , address-f70bb95866f96b9d9257ccd803ffa631eda4bc4e +nodepath-m/44'/60'/0'/19/ , relayerIndex-3 , address-066eeb7c071b1f2d335afa63c4f1f9e27dc6460d +nodepath-m/44'/60'/0'/19/ , relayerIndex-4 , address-6970e44bf3e6d20197ba8c0feb12f8f43a15e90c +nodepath-m/44'/60'/0'/19/ , relayerIndex-5 , address-9c30000538348154e3edbcf4aefd0e5b1e296341 +nodepath-m/44'/60'/0'/19/ , relayerIndex-6 , address-f32c98e9f98943e37aed420c90b88659728a67b9 +nodepath-m/44'/60'/0'/19/ , relayerIndex-7 , address-69ac8d482e89ed2c6bd4d31dc0a4a0992ff3a7e1 +nodepath-m/44'/60'/0'/19/ , relayerIndex-8 , address-4c96e503c4306887f9cc83ab7fef6d5ced879f8d +nodepath-m/44'/60'/0'/19/ , relayerIndex-9 , address-41a65f0e2c90d2ccb97a58496a3faca56ec87a35 +nodepath-m/44'/60'/0'/19/ , relayerIndex-10 , address-757d94a50fc808ace1010811918535037f081a74 +nodepath-m/44'/60'/0'/19/ , relayerIndex-11 , address-8a46713a93a80570fed9daa52c4a28c6cca80220 +nodepath-m/44'/60'/0'/19/ , relayerIndex-12 , address-f9c44ffbdd35fbf0ad10b5b6ab0d0888e79fb11c +nodepath-m/44'/60'/0'/19/ , relayerIndex-13 , address-096ece18a00b577e639830ab65b06c91652ce231 +nodepath-m/44'/60'/0'/19/ , relayerIndex-14 , address-0c820fcd6a95787275f6c068bbba45cfa654e90f +nodepath-m/44'/60'/0'/19/ , relayerIndex-15 , address-e0695f7021ae972ee8bd5b5f5c901ef4f9a40038 +nodepath-m/44'/60'/0'/19/ , relayerIndex-16 , address-be571929a448e00846e49386d441bac31572809d +nodepath-m/44'/60'/0'/19/ , relayerIndex-17 , address-5d48e978dd3004b3fdc1fa67f0f74162cbeeedb2 +nodepath-m/44'/60'/0'/19/ , relayerIndex-18 , address-869ea934a378ac8da1469ca20fd2dd6800869bd0 +nodepath-m/44'/60'/0'/19/ , relayerIndex-19 , address-50cacc5d63a388f3f0f6d3d5c806f849f7d5e352 +nodepath-m/44'/60'/0'/19/ , relayerIndex-20 , address-c2acbbd5ac6ab16dd9a0bf1cfb30e2cbd18e8678 +nodepath-m/44'/60'/0'/19/ , relayerIndex-21 , address-e2819a767f588a4d036f16c97c6d67d328d67390 +nodepath-m/44'/60'/0'/19/ , relayerIndex-22 , address-57589f7b399dd59edb42286b594c436e3f3e7006 +nodepath-m/44'/60'/0'/19/ , relayerIndex-23 , address-1077eb46d2429017c8f65326d8b5f4ab3988cfff +nodepath-m/44'/60'/0'/19/ , relayerIndex-24 , address-c4779fe2b4c60d4e51591a49b23e71f9ecdc441d +nodepath-m/44'/60'/0'/19/ , relayerIndex-25 , address-6322fa6043c0c9fe0ae3454e560caa70b5d5f205 +nodepath-m/44'/60'/0'/19/ , relayerIndex-26 , address-f6268e19bb1e2a8fd5f5e7567c315e9537efd854 +nodepath-m/44'/60'/0'/19/ , relayerIndex-27 , address-c67ae1ee39a4a4833ef3f28396df29d2cd05aa50 +nodepath-m/44'/60'/0'/19/ , relayerIndex-28 , address-50a87a91669bb8e1436512a85ad9197a254caf23 +nodepath-m/44'/60'/0'/19/ , relayerIndex-29 , address-1d4b9e462d9fc6548fe1b61eac56bae8921b2f16 +nodepath-m/44'/60'/0'/19/ , relayerIndex-30 , address-400745f4d52e3d677841fc13ec8042803eba69c8 +nodepath-m/44'/60'/0'/19/ , relayerIndex-31 , address-cd77afc1e9adb30167e6a762493b2a079bc3c597 +nodepath-m/44'/60'/0'/19/ , relayerIndex-32 , address-e0dd736eea7369e1d346a134ef66e0e67dc847eb +nodepath-m/44'/60'/0'/19/ , relayerIndex-33 , address-cadc1cdf7c8c78c8f2d40b49618a05b1907160a1 +nodepath-m/44'/60'/0'/19/ , relayerIndex-34 , address-9585992048b3c7488fd6591f73e0c617d134ea55 +nodepath-m/44'/60'/0'/19/ , relayerIndex-35 , address-bf8ea310452b2665a84ed48b61f050843e7e5360 +nodepath-m/44'/60'/0'/19/ , relayerIndex-36 , address-656aed57cbae7c0d0922cb678ef95e53f495b0ae +nodepath-m/44'/60'/0'/19/ , relayerIndex-37 , address-971430e6465694a5334c184c427c46867f03f443 +nodepath-m/44'/60'/0'/19/ , relayerIndex-38 , address-984a2fb53970818e34c0bcc090da80793cc941c6 +nodepath-m/44'/60'/0'/19/ , relayerIndex-39 , address-ffe70e85c478926f279e86330ef28ef55e1283d7 +nodepath-m/44'/60'/0'/19/ , relayerIndex-40 , address-a55789f9ae5ded5b1f371c6c7ddce39d25f69d80 +nodepath-m/44'/60'/0'/19/ , relayerIndex-41 , address-89e7e614919226e0b19217d4a942a3a8a97ebee6 +nodepath-m/44'/60'/0'/19/ , relayerIndex-42 , address-bf5a70ec102f185afae20d1e8509a94d01f04bb8 +nodepath-m/44'/60'/0'/19/ , relayerIndex-43 , address-eba07e42bb6a463bea88c8f4dcc304dd37e82b70 +nodepath-m/44'/60'/0'/19/ , relayerIndex-44 , address-facfef032a69f84071989af306bd5969201d8cbf +nodepath-m/44'/60'/0'/19/ , relayerIndex-45 , address-f5e49c239a13fa8de55425dd3db94e57cebd369a +nodepath-m/44'/60'/0'/19/ , relayerIndex-46 , address-83cb86e33a71a989852d2b968d8f9efe74c5d596 +nodepath-m/44'/60'/0'/19/ , relayerIndex-47 , address-9535ead5d63765224118baf2f62fa80f31172d35 +nodepath-m/44'/60'/0'/19/ , relayerIndex-48 , address-8ae8649d7e31724d29372cf9943fcfbd9daf66c3 +nodepath-m/44'/60'/0'/19/ , relayerIndex-49 , address-5a29aa7f41aa9096e3ed3ddf344847ad7dc6ba36 +nodepath-m/44'/60'/0'/20/ , relayerIndex-0 , address-96b5927869e767330beef87232ec99471eeae420 +nodepath-m/44'/60'/0'/20/ , relayerIndex-1 , address-9b5fe9a29fe4b88514242b2600421ab2a03ef20e +nodepath-m/44'/60'/0'/20/ , relayerIndex-2 , address-4cbff7e8038ba9d91f69e260607593f4cfe92a7e +nodepath-m/44'/60'/0'/20/ , relayerIndex-3 , address-eb1b07120a4a8840784020e651185a259b5018aa +nodepath-m/44'/60'/0'/20/ , relayerIndex-4 , address-5be442abbe2f67f946c3f51c12fe464cd05aaac8 +nodepath-m/44'/60'/0'/20/ , relayerIndex-5 , address-b7b4e44ed4e30d17586269d157b7dbe246681cb4 +nodepath-m/44'/60'/0'/20/ , relayerIndex-6 , address-9a9545d68902d2e1f10f7ed65c469ccfe149a48b +nodepath-m/44'/60'/0'/20/ , relayerIndex-7 , address-09b73aedda85857bad92b3e6152473cf3b4c56b2 +nodepath-m/44'/60'/0'/20/ , relayerIndex-8 , address-5cb99fddb4f2fc63c80a1ce0a54c4ca69fe0ca85 +nodepath-m/44'/60'/0'/20/ , relayerIndex-9 , address-5a120ffe340815815bbfe104ff7567dccffcbc25 +nodepath-m/44'/60'/0'/20/ , relayerIndex-10 , address-89bb44b7223eecfe6e7082e8782c1ef2697db4b1 +nodepath-m/44'/60'/0'/20/ , relayerIndex-11 , address-323253cacd79f340ebc02b88ccd70ffdad7544f0 +nodepath-m/44'/60'/0'/20/ , relayerIndex-12 , address-dbf2bb523f3f85d6efd43e0c48b547de38afdc10 +nodepath-m/44'/60'/0'/20/ , relayerIndex-13 , address-73cc3dd9a2a66c0764772b999c576399675a8327 +nodepath-m/44'/60'/0'/20/ , relayerIndex-14 , address-9434e9e463d3863ff51ac286c81f0d9391296f19 +nodepath-m/44'/60'/0'/20/ , relayerIndex-15 , address-3816d1bc5dc9b703be8647ddc12cc84d811fc699 +nodepath-m/44'/60'/0'/20/ , relayerIndex-16 , address-938e96f5670f965177ae4ec9c8412609ea0cf131 +nodepath-m/44'/60'/0'/20/ , relayerIndex-17 , address-fed6f29b7faa2eb9f670653d24a123088591725a +nodepath-m/44'/60'/0'/20/ , relayerIndex-18 , address-e42c4b887b12f64857811282d9b9b53aa285ba5c +nodepath-m/44'/60'/0'/20/ , relayerIndex-19 , address-8228d0671abfb353f4ea7e111ccaf2aabd5e4312 +nodepath-m/44'/60'/0'/20/ , relayerIndex-20 , address-d15239fde25b082f483b0d589266030aadf3d8ce +nodepath-m/44'/60'/0'/20/ , relayerIndex-21 , address-f75d1b04849ac08e4731ab54463fa3ac7d14d94e +nodepath-m/44'/60'/0'/20/ , relayerIndex-22 , address-475c6e5683f3adaedb01ca2a303aad9b649c3900 +nodepath-m/44'/60'/0'/20/ , relayerIndex-23 , address-3f0d630e3b22ffabec631c1c12d8c399b62d49c4 +nodepath-m/44'/60'/0'/20/ , relayerIndex-24 , address-7066600cdaf394e315b90b92954181441959bf80 +nodepath-m/44'/60'/0'/20/ , relayerIndex-25 , address-93d834535d7733f45a3cea4c5f9751dae65165f9 +nodepath-m/44'/60'/0'/20/ , relayerIndex-26 , address-1098c0bf100fc9bf8f759f2bbe401220432584a3 +nodepath-m/44'/60'/0'/20/ , relayerIndex-27 , address-363e717f8a27fbadde7d0c56a6fa10fc1973f694 +nodepath-m/44'/60'/0'/20/ , relayerIndex-28 , address-e391def9ca3c69f5f48c0da5088d5f45096b4641 +nodepath-m/44'/60'/0'/20/ , relayerIndex-29 , address-0e878682fcee2cbf7b368882964d3eccddd14084 +nodepath-m/44'/60'/0'/20/ , relayerIndex-30 , address-f162c291de887440516dd3f0e2a3c03215c915ab +nodepath-m/44'/60'/0'/20/ , relayerIndex-31 , address-6705ec9b93c405e10da0eb4b24a8a91657e5c1f7 +nodepath-m/44'/60'/0'/20/ , relayerIndex-32 , address-5e73ead094535e1219b34acc794834876f394387 +nodepath-m/44'/60'/0'/20/ , relayerIndex-33 , address-c0f45757512a8e9745ca790b7ff0bce546669cfc +nodepath-m/44'/60'/0'/20/ , relayerIndex-34 , address-39d75370566e5b06cbf2a61af1f7037fb04b6c2a +nodepath-m/44'/60'/0'/20/ , relayerIndex-35 , address-f69f493018aa8bf5a924baf4794702c6d9640dc7 +nodepath-m/44'/60'/0'/20/ , relayerIndex-36 , address-990c473b7a3997d27a83697a5cbd1f9c101359ab +nodepath-m/44'/60'/0'/20/ , relayerIndex-37 , address-a3f21f6db03a3f2792a6105f4affea2d0caccc39 +nodepath-m/44'/60'/0'/20/ , relayerIndex-38 , address-79a9be679969be98cb10c27f9936356e7a8086ba +nodepath-m/44'/60'/0'/20/ , relayerIndex-39 , address-0dd1001c20b1cf63c0bcabcabfa5d8ea4ae38b10 +nodepath-m/44'/60'/0'/20/ , relayerIndex-40 , address-e34e112bad10ebe7c891e39bd724f5deda13bd9f +nodepath-m/44'/60'/0'/20/ , relayerIndex-41 , address-d5c3de429e6717aef86404b1d77f314de8b18d1e +nodepath-m/44'/60'/0'/20/ , relayerIndex-42 , address-6ad1d9bb21794d08181fa8d5086ae19d1a3a7aac +nodepath-m/44'/60'/0'/20/ , relayerIndex-43 , address-2c1af1eda3b319abb00549b08c96b1f427637580 +nodepath-m/44'/60'/0'/20/ , relayerIndex-44 , address-756179c7dd7323ff4af906e337f07443eddd6ba7 +nodepath-m/44'/60'/0'/20/ , relayerIndex-45 , address-6302d2b40d55183cbd5cb25b6df5aeba1b937a50 +nodepath-m/44'/60'/0'/20/ , relayerIndex-46 , address-3d4df90e47b0c22610865fc2863aa7698ec322c1 +nodepath-m/44'/60'/0'/20/ , relayerIndex-47 , address-f4a2dd9e938ad7348f239615202b6a9d9161576b +nodepath-m/44'/60'/0'/20/ , relayerIndex-48 , address-86b63617f7b012a14e2b06d46d548c9305dfd2fb +nodepath-m/44'/60'/0'/20/ , relayerIndex-49 , address-6f10258b48e41cbcff86759cb7d5b22a83870fde +nodepath-m/44'/60'/0'/21/ , relayerIndex-0 , address-a1fe0ba43675eb33de9525f6a6ffdd176636a8d4 +nodepath-m/44'/60'/0'/21/ , relayerIndex-1 , address-443d4ae6d47378558f28e06ac87d4bd47b7ced22 +nodepath-m/44'/60'/0'/21/ , relayerIndex-2 , address-d6f5bb7f7b61ddb49ca707cee38c78eebbbe9b43 +nodepath-m/44'/60'/0'/21/ , relayerIndex-3 , address-bd1ac51897c217ad0e2d56d6e9d9f08cda19a695 +nodepath-m/44'/60'/0'/21/ , relayerIndex-4 , address-afb25e7fcbec46e541c80825b3378738cf006e2e +nodepath-m/44'/60'/0'/21/ , relayerIndex-5 , address-f736df6aa0173d20fc3a136f132211c1723a5993 +nodepath-m/44'/60'/0'/21/ , relayerIndex-6 , address-514844f0ea54b376db9ab34de8c089710d144a62 +nodepath-m/44'/60'/0'/21/ , relayerIndex-7 , address-d26d4c75c251ac9cf3ee35cb372040ef2fcd1d3c +nodepath-m/44'/60'/0'/21/ , relayerIndex-8 , address-1330b915332cbb332cd512d6f229d1e8aeae8794 +nodepath-m/44'/60'/0'/21/ , relayerIndex-9 , address-11c8e398a75bfb8ea1b892d4b595982539e964af +nodepath-m/44'/60'/0'/21/ , relayerIndex-10 , address-13637a207c6fc9317d6ff4ab25e8ea0cbb979d14 +nodepath-m/44'/60'/0'/21/ , relayerIndex-11 , address-64616012cf0db75b4ac95baf515f28e404a770b3 +nodepath-m/44'/60'/0'/21/ , relayerIndex-12 , address-82f27635642d7f09ff32a0fff14702703e6f499d +nodepath-m/44'/60'/0'/21/ , relayerIndex-13 , address-151c09b6c8adf57d1ea810d7147793d9f20792d7 +nodepath-m/44'/60'/0'/21/ , relayerIndex-14 , address-2aeb8666710f0afa996a1dfd7039b97a8d3ac3fe +nodepath-m/44'/60'/0'/21/ , relayerIndex-15 , address-27893a94ba4f3070a2de9d8f2397ac2b8304c48e +nodepath-m/44'/60'/0'/21/ , relayerIndex-16 , address-096a2c75cd13e617ed9131765767dddc036fb718 +nodepath-m/44'/60'/0'/21/ , relayerIndex-17 , address-6f4dddc755f3fe091d7f603f8a38766de0cfe1d4 +nodepath-m/44'/60'/0'/21/ , relayerIndex-18 , address-79ad89653f7efe119eb69e87eeff5d368899ac61 +nodepath-m/44'/60'/0'/21/ , relayerIndex-19 , address-537cfbd6e91e1fe4a475bf0a11d226e0b54a783f +nodepath-m/44'/60'/0'/21/ , relayerIndex-20 , address-a1c99c6b90e8883aecc305ea18a7dceea1fe611c +nodepath-m/44'/60'/0'/21/ , relayerIndex-21 , address-af85171d5d0e2e239d1dabfd416d9369f548600e +nodepath-m/44'/60'/0'/21/ , relayerIndex-22 , address-c6b7822ee470eca75c4acf55ca1676c2ce5bfaf2 +nodepath-m/44'/60'/0'/21/ , relayerIndex-23 , address-8971fabfb515393bab8bea2fb3e6bd02c94d6b87 +nodepath-m/44'/60'/0'/21/ , relayerIndex-24 , address-1b95823c53b36b660eeda94f2c0572a6e69fc974 +nodepath-m/44'/60'/0'/21/ , relayerIndex-25 , address-91e28f68af10b13b75444c831880c3e1b8153240 +nodepath-m/44'/60'/0'/21/ , relayerIndex-26 , address-6bade028e20de861a7f1fda61fd1959e0c98896c +nodepath-m/44'/60'/0'/21/ , relayerIndex-27 , address-2e06981a9ca6c2107a47fd182479ed70b218ef15 +nodepath-m/44'/60'/0'/21/ , relayerIndex-28 , address-852ce0b56a1135011c16a83621c7d77c621c022b +nodepath-m/44'/60'/0'/21/ , relayerIndex-29 , address-d76cd9b3150391e6756ed7d348ff19fe546b3367 +nodepath-m/44'/60'/0'/21/ , relayerIndex-30 , address-5ade6304a6a72940402831747c030dd7bdbcb50b +nodepath-m/44'/60'/0'/21/ , relayerIndex-31 , address-aa8f963d1fb3f4d8251d55520a8e97ccf993e6cc +nodepath-m/44'/60'/0'/21/ , relayerIndex-32 , address-3e0513a510feecfa5e1c12e524fcba675cbbae02 +nodepath-m/44'/60'/0'/21/ , relayerIndex-33 , address-cf46b23fd704c9d432dc6ae76bc0aa11b669a9b5 +nodepath-m/44'/60'/0'/21/ , relayerIndex-34 , address-1d1edd14357ff32f3e01914a047b5fdb35cbeaf3 +nodepath-m/44'/60'/0'/21/ , relayerIndex-35 , address-5684494959c99bdb1f56f769103b554a3f8e3c0f +nodepath-m/44'/60'/0'/21/ , relayerIndex-36 , address-3489e8c8065342be796c48d9d4cc1fa5dc821a03 +nodepath-m/44'/60'/0'/21/ , relayerIndex-37 , address-b29c9a47c06b6609b976a15d791bafa01d61aa9e +nodepath-m/44'/60'/0'/21/ , relayerIndex-38 , address-c89b2eb9c71826f10b6effb2ff67fc878207756e +nodepath-m/44'/60'/0'/21/ , relayerIndex-39 , address-1eda607c54e882088aef796d9b3c5373396e45f9 +nodepath-m/44'/60'/0'/21/ , relayerIndex-40 , address-a235167b23a978af805b3c8464a937400c4f1f7e +nodepath-m/44'/60'/0'/21/ , relayerIndex-41 , address-4dad7d9a50c77cb20a6a8b633bdd7c5050884a18 +nodepath-m/44'/60'/0'/21/ , relayerIndex-42 , address-57282a59b39e77744f4efc5c3360bb2e231a6245 +nodepath-m/44'/60'/0'/21/ , relayerIndex-43 , address-724bffb84adc16daa29cc4433ac237613990a211 +nodepath-m/44'/60'/0'/21/ , relayerIndex-44 , address-3b5f50eb3f49096e640ef1e249f1faf9506a2f05 +nodepath-m/44'/60'/0'/21/ , relayerIndex-45 , address-cf0826063acf977ad98fc214d76046ec21103849 +nodepath-m/44'/60'/0'/21/ , relayerIndex-46 , address-8a83862dcd7533c17a997e34e7ef640f39178804 +nodepath-m/44'/60'/0'/21/ , relayerIndex-47 , address-2fc31d7062e4f8153ec68d9c44b7e70da180017f +nodepath-m/44'/60'/0'/21/ , relayerIndex-48 , address-e4eedc1700f80801394587bc1d76369a4b3c0c78 +nodepath-m/44'/60'/0'/21/ , relayerIndex-49 , address-87c49bf9846d9e1364e8d55ae4c64e646a1d247c +nodepath-m/44'/60'/0'/22/ , relayerIndex-0 , address-aad8b59b25f8d46135d48b6fc7fdac6b166ef733 +nodepath-m/44'/60'/0'/22/ , relayerIndex-1 , address-cbbb785f7ff58ff777b7ed51613d13894f57d51f +nodepath-m/44'/60'/0'/22/ , relayerIndex-2 , address-e58b87364e6a0e68fa3bacfa8d50a9df67e2f3ad +nodepath-m/44'/60'/0'/22/ , relayerIndex-3 , address-58052245f648cc9e826e9761cf946b7d05ca5e93 +nodepath-m/44'/60'/0'/22/ , relayerIndex-4 , address-c8dc549411d72f512ac3a8716936368ff4f4a3c7 +nodepath-m/44'/60'/0'/22/ , relayerIndex-5 , address-ff637ad4f059f225bf5c1d9d0936c6b2c1d4abb7 +nodepath-m/44'/60'/0'/22/ , relayerIndex-6 , address-e88dc699f0194235edae3dc8008c7cb2a0f8f0d3 +nodepath-m/44'/60'/0'/22/ , relayerIndex-7 , address-d48f42df01e17eecd5b26151ccd690925000440d +nodepath-m/44'/60'/0'/22/ , relayerIndex-8 , address-ada6173bca274ac2c1a89fd12388f601ec562e9a +nodepath-m/44'/60'/0'/22/ , relayerIndex-9 , address-1843210367d0153950adbbe95143bf0dd704cea1 +nodepath-m/44'/60'/0'/22/ , relayerIndex-10 , address-2722b7ff6ccec8c310f123ea81cf5d8aee24cd77 +nodepath-m/44'/60'/0'/22/ , relayerIndex-11 , address-3088ad8683ca737e274dddde3356d9c93f0fd1ef +nodepath-m/44'/60'/0'/22/ , relayerIndex-12 , address-ffc408a9682f97cc752bd0e73174f480aa107bb0 +nodepath-m/44'/60'/0'/22/ , relayerIndex-13 , address-522acb4cc43821f25a8a31a817fe93273715ce36 +nodepath-m/44'/60'/0'/22/ , relayerIndex-14 , address-3caf5151fcf98df7287ec5f385adc7bfa5abdd08 +nodepath-m/44'/60'/0'/22/ , relayerIndex-15 , address-1b54c89fa2a8238f170898ab98db8057d4351e8d +nodepath-m/44'/60'/0'/22/ , relayerIndex-16 , address-ba5a8825fde4f58cb509cb1a4b9381b4f9c3cf1d +nodepath-m/44'/60'/0'/22/ , relayerIndex-17 , address-4ea2b5fd51b98ee40c051cd4120df652c6ad2aec +nodepath-m/44'/60'/0'/22/ , relayerIndex-18 , address-43143a313a46389e9868f658cdb498bb5c5bcb9c +nodepath-m/44'/60'/0'/22/ , relayerIndex-19 , address-f4f09adce7954e8178fcb3bf4bbe036345c9b2be +nodepath-m/44'/60'/0'/22/ , relayerIndex-20 , address-a19089908aba432741df90e40d27d6547d641432 +nodepath-m/44'/60'/0'/22/ , relayerIndex-21 , address-7cdd87508777c4b19c4127245b25a0f749d4bd82 +nodepath-m/44'/60'/0'/22/ , relayerIndex-22 , address-be142c1c6bf522598484b81617e357606504574b +nodepath-m/44'/60'/0'/22/ , relayerIndex-23 , address-fd5684e0bfece6bab4c4a8b53ce9e99bb79866b1 +nodepath-m/44'/60'/0'/22/ , relayerIndex-24 , address-7bd36c13476d8d20fd844f18aac97b49f374c87e +nodepath-m/44'/60'/0'/22/ , relayerIndex-25 , address-9a0b4eab1724e360db90918db1bed340589dfd42 +nodepath-m/44'/60'/0'/22/ , relayerIndex-26 , address-e59bd28d627bd4381ae7a3626f7f016299f0b9b6 +nodepath-m/44'/60'/0'/22/ , relayerIndex-27 , address-b64975bcbcd126f98adc5e120af0bf2556dd2364 +nodepath-m/44'/60'/0'/22/ , relayerIndex-28 , address-c430e2e349344deda781bbdc2bf7bbfdb3b09bcd +nodepath-m/44'/60'/0'/22/ , relayerIndex-29 , address-3fce9f664509c0bffa194783187d0d95a4b1add6 +nodepath-m/44'/60'/0'/22/ , relayerIndex-30 , address-7deda585e725e979871ee2a5c0b1ea95d964a2c8 +nodepath-m/44'/60'/0'/22/ , relayerIndex-31 , address-53e53f0009e0adb88d25070556291184570ba3d3 +nodepath-m/44'/60'/0'/22/ , relayerIndex-32 , address-3d0aa3008912efb35df11fae1c5da59c428adf7c +nodepath-m/44'/60'/0'/22/ , relayerIndex-33 , address-d6f1b553f0f62936bd1d7c3931744883c5171c24 +nodepath-m/44'/60'/0'/22/ , relayerIndex-34 , address-e961b71ed322fb83d99fe7a8e58b062f82b2dca8 +nodepath-m/44'/60'/0'/22/ , relayerIndex-35 , address-288e7dde5d3509b0a1342122b8b06c6ff332c496 +nodepath-m/44'/60'/0'/22/ , relayerIndex-36 , address-d3d15e41f8b5ed62a568a7be003ee21ba551186c +nodepath-m/44'/60'/0'/22/ , relayerIndex-37 , address-a947093aed4d202fba51d22268ab94f8d94d1ad7 +nodepath-m/44'/60'/0'/22/ , relayerIndex-38 , address-675e796ba85ba95aac3cbe2fe9fdf9e44bced9bd +nodepath-m/44'/60'/0'/22/ , relayerIndex-39 , address-b8c5b81aebf5988ad800f903fbf361e7ac5c30c9 +nodepath-m/44'/60'/0'/22/ , relayerIndex-40 , address-daeb8cc2252f51b2e7695412ac06089b57aca8b2 +nodepath-m/44'/60'/0'/22/ , relayerIndex-41 , address-b298e909a7e90a8ffc6d60418c266701f60b81e1 +nodepath-m/44'/60'/0'/22/ , relayerIndex-42 , address-4a250a309675edd8bcb6d0a81fa68f44238a68e0 +nodepath-m/44'/60'/0'/22/ , relayerIndex-43 , address-414417fae490c1cbcafc48ece8d07c9e7e9210ff +nodepath-m/44'/60'/0'/22/ , relayerIndex-44 , address-82666b40df8e7674208f9f7389f151047199723f +nodepath-m/44'/60'/0'/22/ , relayerIndex-45 , address-0bc62f8ecbf8566649d47826516be3ada8cc8826 +nodepath-m/44'/60'/0'/22/ , relayerIndex-46 , address-7de91aff9aecddffc400aa794e23cac1f9573af4 +nodepath-m/44'/60'/0'/22/ , relayerIndex-47 , address-3a3a10dd604a9e11718b686932baf4710e683d86 +nodepath-m/44'/60'/0'/22/ , relayerIndex-48 , address-48de0178e9505164433504c8a014af9c6a6ab2e3 +nodepath-m/44'/60'/0'/22/ , relayerIndex-49 , address-8358754805af461f23fa97e362c15a98b9222ca7 +nodepath-m/44'/60'/0'/23/ , relayerIndex-0 , address-ebd59389b521de7f69391054e59c668eb1c89fdd +nodepath-m/44'/60'/0'/23/ , relayerIndex-1 , address-fa05b2fc15536ad311640a321d028b2879a4cc83 +nodepath-m/44'/60'/0'/23/ , relayerIndex-2 , address-0fb42bfc5a5882186a307d1ad7c2952ed4b25f6f +nodepath-m/44'/60'/0'/23/ , relayerIndex-3 , address-2eca585feca5ada56bae8b0bcae995c77c1def9e +nodepath-m/44'/60'/0'/23/ , relayerIndex-4 , address-26a991223302696ccee152ae91f360afa5c6e5aa +nodepath-m/44'/60'/0'/23/ , relayerIndex-5 , address-65076cf1e5383d0e75ab7d01fbb232f0b815629a +nodepath-m/44'/60'/0'/23/ , relayerIndex-6 , address-afdb10d1f8dc4c09225590ae31c90aeabf055d6d +nodepath-m/44'/60'/0'/23/ , relayerIndex-7 , address-0f52841e2fc797b4222d365fbbfed2c07e0d9f75 +nodepath-m/44'/60'/0'/23/ , relayerIndex-8 , address-d97b3e0cf7c4c86201f7d9932a8595229b13c931 +nodepath-m/44'/60'/0'/23/ , relayerIndex-9 , address-99ac76f33ee29763cc7f19faac9feb415d81c6bb +nodepath-m/44'/60'/0'/23/ , relayerIndex-10 , address-9846f2c19733215a93d8291a86c7f0a9bcf78e1e +nodepath-m/44'/60'/0'/23/ , relayerIndex-11 , address-90459d24737944b889badcb62ba7dcb6cf6d6644 +nodepath-m/44'/60'/0'/23/ , relayerIndex-12 , address-73ff91e1efa00e99962c0bf3b7a9473b07fe1929 +nodepath-m/44'/60'/0'/23/ , relayerIndex-13 , address-b1ab067430e723e292f44466795890d406836443 +nodepath-m/44'/60'/0'/23/ , relayerIndex-14 , address-a79db8953acabea60ba9479d02b977492d48122e +nodepath-m/44'/60'/0'/23/ , relayerIndex-15 , address-4f679e7ab685d6b03767ed6c9f47f5cd804eebd7 +nodepath-m/44'/60'/0'/23/ , relayerIndex-16 , address-de2d5dcd7d1e094605d6089f7385ba44cfff4800 +nodepath-m/44'/60'/0'/23/ , relayerIndex-17 , address-c758753791ff48537b1a6dbe1a3319e34fef324e +nodepath-m/44'/60'/0'/23/ , relayerIndex-18 , address-f6b55e0d5e43654e4b1d91356f5d7e19b21510c7 +nodepath-m/44'/60'/0'/23/ , relayerIndex-19 , address-df71dab3d5feac3b97b64c586e551811d0283367 +nodepath-m/44'/60'/0'/23/ , relayerIndex-20 , address-d0cb6d6b57294aa894c58f6ea4544470cf6cc9d0 +nodepath-m/44'/60'/0'/23/ , relayerIndex-21 , address-c67249548c27ca942cb3c31090c0dd0b8eae98d5 +nodepath-m/44'/60'/0'/23/ , relayerIndex-22 , address-9ef9be77570536094a1de6073265f53d6c239ab3 +nodepath-m/44'/60'/0'/23/ , relayerIndex-23 , address-737a82f39bf33534a27a2fe288d61ffe722a78f4 +nodepath-m/44'/60'/0'/23/ , relayerIndex-24 , address-c4f88035196851aa8f06990b4f1425bb499b4767 +nodepath-m/44'/60'/0'/23/ , relayerIndex-25 , address-ebb557483a7a8d0740b845297c73020d0946bf85 +nodepath-m/44'/60'/0'/23/ , relayerIndex-26 , address-6b736b35ca72b943a0e4e43001ef7ad7702a18ce +nodepath-m/44'/60'/0'/23/ , relayerIndex-27 , address-c252abfe6b722bbb40540d3a95f80cccbfb71b5f +nodepath-m/44'/60'/0'/23/ , relayerIndex-28 , address-fe6ac49fd838d6af025d2d599a0f1a512d0d819c +nodepath-m/44'/60'/0'/23/ , relayerIndex-29 , address-8137cd3e344a160d55db08623f88598be04e2e2f +nodepath-m/44'/60'/0'/23/ , relayerIndex-30 , address-47caf7da47a21353f81f3f3a6ab85de6d61436d2 +nodepath-m/44'/60'/0'/23/ , relayerIndex-31 , address-18659b661d88dbbb5759e10b19bc7e16119207a2 +nodepath-m/44'/60'/0'/23/ , relayerIndex-32 , address-b7ba3bc172bd03bf00bedb3a28139f3c41d3e4c7 +nodepath-m/44'/60'/0'/23/ , relayerIndex-33 , address-736ccf61ed801234306fb6c1d240d4a8de739683 +nodepath-m/44'/60'/0'/23/ , relayerIndex-34 , address-c6349f3c309b8e8549403e917aaf504c8f8de719 +nodepath-m/44'/60'/0'/23/ , relayerIndex-35 , address-5d0c341bc15e7d78275a53030e75ea8160e8fadb +nodepath-m/44'/60'/0'/23/ , relayerIndex-36 , address-efc9eae57eb671b6809f7f4414c9e370ef10bf30 +nodepath-m/44'/60'/0'/23/ , relayerIndex-37 , address-d8a1b7baf5222566465d538c2030b5999ef5b929 +nodepath-m/44'/60'/0'/23/ , relayerIndex-38 , address-a1c0eaadd3da1ae65682b52475a3d790fd1d2aec +nodepath-m/44'/60'/0'/23/ , relayerIndex-39 , address-265d6eb705dfdcb4e66a96e966936757b99c7c18 +nodepath-m/44'/60'/0'/23/ , relayerIndex-40 , address-493aa5abeb25c1ecbf7810a4c9794773c5471ea2 +nodepath-m/44'/60'/0'/23/ , relayerIndex-41 , address-001315d3d70ef49fd73aa4c14d1dd3401efe3107 +nodepath-m/44'/60'/0'/23/ , relayerIndex-42 , address-5af04ec7c98bd1e415c36ef08cfbdebf28a56f0a +nodepath-m/44'/60'/0'/23/ , relayerIndex-43 , address-b6c39d9cfb28aaadce668228314ffef027e5d969 +nodepath-m/44'/60'/0'/23/ , relayerIndex-44 , address-8640cbfb487c89e6fba9c9ddcbed40de6aad3252 +nodepath-m/44'/60'/0'/23/ , relayerIndex-45 , address-50c09e3d9d60422894186b9c90fd1683cb789a04 +nodepath-m/44'/60'/0'/23/ , relayerIndex-46 , address-36261cf397c0a7716860221bbf4c41fa84f09a3b +nodepath-m/44'/60'/0'/23/ , relayerIndex-47 , address-088c5522db3f9ce8a55da38a2c86093abb68ee5a +nodepath-m/44'/60'/0'/23/ , relayerIndex-48 , address-dda58591902d9e83ff4e592184fc1508a1b9b843 +nodepath-m/44'/60'/0'/23/ , relayerIndex-49 , address-beb276f87b46f56933a6c0bc7c515520c3a2c89b +nodepath-m/44'/60'/0'/24/ , relayerIndex-0 , address-91a73a38847a509becbf48095d1d585f3733d054 +nodepath-m/44'/60'/0'/24/ , relayerIndex-1 , address-f0a453e3562e3bffe1bb2f37ac3680b7a016a415 +nodepath-m/44'/60'/0'/24/ , relayerIndex-2 , address-1ddd404214bd13d507fe37172307abdaa94e89bd +nodepath-m/44'/60'/0'/24/ , relayerIndex-3 , address-a5a90d0912643a4c4222598c177cf279b05e2999 +nodepath-m/44'/60'/0'/24/ , relayerIndex-4 , address-c9760a3575ee60752cbca22e2b4651a9d2e40a72 +nodepath-m/44'/60'/0'/24/ , relayerIndex-5 , address-3d846651c9187c6cf0a6f226a5213a1e9da1efe7 +nodepath-m/44'/60'/0'/24/ , relayerIndex-6 , address-465d19a4d9e7a33cae419626cce44b652e96aeb0 +nodepath-m/44'/60'/0'/24/ , relayerIndex-7 , address-f813a1457322411c44209ee82bfd0e4de4ee059b +nodepath-m/44'/60'/0'/24/ , relayerIndex-8 , address-d33210432382b5bf6fb422037198abeab7186158 +nodepath-m/44'/60'/0'/24/ , relayerIndex-9 , address-1a4653ea757b6b862ecc76859b835eed41ab1b4b +nodepath-m/44'/60'/0'/24/ , relayerIndex-10 , address-bca854bdee177fa0bafa19db4f0d8d9f96876788 +nodepath-m/44'/60'/0'/24/ , relayerIndex-11 , address-1257141c8f8346d9a8344668c131deaaf37e0abe +nodepath-m/44'/60'/0'/24/ , relayerIndex-12 , address-a058ef8a56e830f39703d25cf653de178292cc80 +nodepath-m/44'/60'/0'/24/ , relayerIndex-13 , address-5c5c9cf3e3df0c781a5a4ef3d7912070929a194e +nodepath-m/44'/60'/0'/24/ , relayerIndex-14 , address-44cb6f338e02c1e317d04874e4c000f41f26928c +nodepath-m/44'/60'/0'/24/ , relayerIndex-15 , address-ccd93804019de3585b3b259ee921f53141bf9f73 +nodepath-m/44'/60'/0'/24/ , relayerIndex-16 , address-60f07759887bc09178e11ef017ee8f1835117948 +nodepath-m/44'/60'/0'/24/ , relayerIndex-17 , address-a1879d6ebff668ef68fac87ce5038acde8f18e09 +nodepath-m/44'/60'/0'/24/ , relayerIndex-18 , address-49b495737a6c2da70c817ef7de3050c5df8a715c +nodepath-m/44'/60'/0'/24/ , relayerIndex-19 , address-58468bf7730fcf42eaed7c3bd6cedc5e317f26c3 +nodepath-m/44'/60'/0'/24/ , relayerIndex-20 , address-4ca0566e01c3fd8ea732aa2e08465e0e0301a152 +nodepath-m/44'/60'/0'/24/ , relayerIndex-21 , address-ea1684be6b3c9a351e652776d589723d9bf673ad +nodepath-m/44'/60'/0'/24/ , relayerIndex-22 , address-1a9f2d559b9b6869a09189cfecbdf97875696284 +nodepath-m/44'/60'/0'/24/ , relayerIndex-23 , address-27d04bb697f5b5fb8b9ea93e3f599fbfcd93f6a9 +nodepath-m/44'/60'/0'/24/ , relayerIndex-24 , address-2a98922c7ed300655da392ab4acb1a046c602fc4 +nodepath-m/44'/60'/0'/24/ , relayerIndex-25 , address-01784f50ae54adf9a2b6fd631791b4ff842d43b4 +nodepath-m/44'/60'/0'/24/ , relayerIndex-26 , address-7895505eb84cc5a14213465b079690c1d32ae75e +nodepath-m/44'/60'/0'/24/ , relayerIndex-27 , address-a519ef6f675202ecb07a1aa0294eec6f4a99cb5e +nodepath-m/44'/60'/0'/24/ , relayerIndex-28 , address-13686c3cc11d540d7da8880ebf9526967408c76e +nodepath-m/44'/60'/0'/24/ , relayerIndex-29 , address-7856c3ca2f999d9075e47ad807857fe9836da0a7 +nodepath-m/44'/60'/0'/24/ , relayerIndex-30 , address-74e9320b271fff1f78f4ec4ec9d23b2800733fec +nodepath-m/44'/60'/0'/24/ , relayerIndex-31 , address-b2a94b601d346fbb2b35fb87579de031643b317d +nodepath-m/44'/60'/0'/24/ , relayerIndex-32 , address-24ff96ad635d2a431ab7a016f13f60ec211c4655 +nodepath-m/44'/60'/0'/24/ , relayerIndex-33 , address-8021d2c40a0a6a480d7eb89920776a20347be116 +nodepath-m/44'/60'/0'/24/ , relayerIndex-34 , address-a7162e829dde7eecba0f631b8ca5b20e22fdff1f +nodepath-m/44'/60'/0'/24/ , relayerIndex-35 , address-ed2dfbe950aad8e4318413a6b4b148275a1d281c +nodepath-m/44'/60'/0'/24/ , relayerIndex-36 , address-ae182cdab7be644e8ebf35c8146f55c03144ba3e +nodepath-m/44'/60'/0'/24/ , relayerIndex-37 , address-e2770318fbca84e17cce555515fc2fd8044e25d3 +nodepath-m/44'/60'/0'/24/ , relayerIndex-38 , address-737ca5b3941ed1fee5779fb2ebf8ae7f600babd0 +nodepath-m/44'/60'/0'/24/ , relayerIndex-39 , address-8634c06c61c95bbcccf3997c37b236ea150a8815 +nodepath-m/44'/60'/0'/24/ , relayerIndex-40 , address-1232845a2d546bedf997fb57cc56e47809e6e53e +nodepath-m/44'/60'/0'/24/ , relayerIndex-41 , address-8aa993dd76e7c5f73f2137da0e4dd776be405f64 +nodepath-m/44'/60'/0'/24/ , relayerIndex-42 , address-36b72fd8cc6ced0eb4664579c4f026ca4c755623 +nodepath-m/44'/60'/0'/24/ , relayerIndex-43 , address-581c87b811e884a10fdf655a3449d7fcd0eb10f2 +nodepath-m/44'/60'/0'/24/ , relayerIndex-44 , address-a86a915f22b2546da99519bf44178532d71baf45 +nodepath-m/44'/60'/0'/24/ , relayerIndex-45 , address-c88ab73ec3f3f431a47a09dfe7c28bcb37a9fc68 +nodepath-m/44'/60'/0'/24/ , relayerIndex-46 , address-0ce2aeeef477e4e28cce2a4aa1488a2b7acd2025 +nodepath-m/44'/60'/0'/24/ , relayerIndex-47 , address-e66209395917e5bfbe6cfd0009e0fe87d49c555c +nodepath-m/44'/60'/0'/24/ , relayerIndex-48 , address-53289bf1f143dcf6fb33fc43ceb7e7aa79437143 +nodepath-m/44'/60'/0'/24/ , relayerIndex-49 , address-ed13f39eea3df3bb8d696761c81c417d8a8b3e1c +nodepath-m/44'/60'/0'/25/ , relayerIndex-0 , address-66ba14f114b77ae97b21bb613e5927b07e90b7a3 +nodepath-m/44'/60'/0'/25/ , relayerIndex-1 , address-20a29c72c45209af402db8a0d2c4160257b9dae8 +nodepath-m/44'/60'/0'/25/ , relayerIndex-2 , address-1996f2a59e050fa6a1d39096008125161c4a01df +nodepath-m/44'/60'/0'/25/ , relayerIndex-3 , address-8eb8c592284e46b85264b01d26382fcc1c7db5ea +nodepath-m/44'/60'/0'/25/ , relayerIndex-4 , address-14f552c7d123e73721aec81f14890f6ef81ef518 +nodepath-m/44'/60'/0'/25/ , relayerIndex-5 , address-2ae08eea046c905723e11fba7c8a5209b9d8097c +nodepath-m/44'/60'/0'/25/ , relayerIndex-6 , address-44be80f343445f1922d2519eeb6dde3b7557f335 +nodepath-m/44'/60'/0'/25/ , relayerIndex-7 , address-78871d16c9dcfe313815800ac3b94939e63dd552 +nodepath-m/44'/60'/0'/25/ , relayerIndex-8 , address-45a657b790f07b3be51b7fcc1868c64cb33a8438 +nodepath-m/44'/60'/0'/25/ , relayerIndex-9 , address-f45874ea2b8c8c51b60fed324542aa7be48cd711 +nodepath-m/44'/60'/0'/25/ , relayerIndex-10 , address-97c40035ce684ac449988afb4349f47eebba1263 +nodepath-m/44'/60'/0'/25/ , relayerIndex-11 , address-879c623508da2cd9c04a23b9af4e74129d5e39f0 +nodepath-m/44'/60'/0'/25/ , relayerIndex-12 , address-3a6b0e646744d4e8d03666cf21bf25df29bd9b70 +nodepath-m/44'/60'/0'/25/ , relayerIndex-13 , address-4cde313e3616d4f7bc440b242508de840628b8bf +nodepath-m/44'/60'/0'/25/ , relayerIndex-14 , address-843b8c7df65c0ca4e5a5ce48c1582d257b45baef +nodepath-m/44'/60'/0'/25/ , relayerIndex-15 , address-8130cf8c9febd3fdad3b53fcb7abb49d14863ef1 +nodepath-m/44'/60'/0'/25/ , relayerIndex-16 , address-03f1bf97de58729eb2a6ca2406426ad182fefc21 +nodepath-m/44'/60'/0'/25/ , relayerIndex-17 , address-cf7d7f4e07351b2a59b229d3481700a954e3606a +nodepath-m/44'/60'/0'/25/ , relayerIndex-18 , address-af98a90796ad4b3aae1a5d6814f615055095db8d +nodepath-m/44'/60'/0'/25/ , relayerIndex-19 , address-7a460d470865a9f1c34c8faf6cd9c46eb302c59e +nodepath-m/44'/60'/0'/25/ , relayerIndex-20 , address-3989207a58b64c4a105bca961525eb1201663a80 +nodepath-m/44'/60'/0'/25/ , relayerIndex-21 , address-89b3fcfcae4d95f7e7ec851effc4e89ab3b69381 +nodepath-m/44'/60'/0'/25/ , relayerIndex-22 , address-15f44576199ffe8b013fb63413b78d7259421fec +nodepath-m/44'/60'/0'/25/ , relayerIndex-23 , address-31bebddfc0b6ad646fb122e4764c7e25a87f6d93 +nodepath-m/44'/60'/0'/25/ , relayerIndex-24 , address-6fb6db4a0a6c7186b54baf92f2f52345fd35b441 +nodepath-m/44'/60'/0'/25/ , relayerIndex-25 , address-14bf4c7b229e3c0c61847434febdff98064947b0 +nodepath-m/44'/60'/0'/25/ , relayerIndex-26 , address-537236d07935767618813d2f83a2e01dff088283 +nodepath-m/44'/60'/0'/25/ , relayerIndex-27 , address-90f4a94448a2dfa347fbfa7a2d759d2516d32490 +nodepath-m/44'/60'/0'/25/ , relayerIndex-28 , address-2e43869b53c7e3c46dd89abd78ba0a0cb1a03d36 +nodepath-m/44'/60'/0'/25/ , relayerIndex-29 , address-547a22098dcd0d55417f0751180b24e276868226 +nodepath-m/44'/60'/0'/25/ , relayerIndex-30 , address-e9bd6a2dc8db48cbb1e818c611f0f2b2ba0c9275 +nodepath-m/44'/60'/0'/25/ , relayerIndex-31 , address-37e1c71b470ce67c4c370e111e3c6f20310f9d76 +nodepath-m/44'/60'/0'/25/ , relayerIndex-32 , address-fe0703d5ba50a83d8125742fd5e239f48d691767 +nodepath-m/44'/60'/0'/25/ , relayerIndex-33 , address-12c6496d89035d299715d714903c9c95bb91c2a0 +nodepath-m/44'/60'/0'/25/ , relayerIndex-34 , address-d07b7db683ad53916ec362727ce93111821147e5 +nodepath-m/44'/60'/0'/25/ , relayerIndex-35 , address-f2efe7afad91ab07021d882f49982a3a5c566aec +nodepath-m/44'/60'/0'/25/ , relayerIndex-36 , address-a837000e0d9d742e2554323b6cc7e4199f5d4170 +nodepath-m/44'/60'/0'/25/ , relayerIndex-37 , address-89ab2848e0940938d6c657693a2a8f5bf3122f48 +nodepath-m/44'/60'/0'/25/ , relayerIndex-38 , address-646e1ac29e065a68804e77cb733ba48f723aee21 +nodepath-m/44'/60'/0'/25/ , relayerIndex-39 , address-717e1a183552be4f16f48f6d38765baa278d52b5 +nodepath-m/44'/60'/0'/25/ , relayerIndex-40 , address-ffca161fab8660eec11cc96011cb252f297c8921 +nodepath-m/44'/60'/0'/25/ , relayerIndex-41 , address-e6895c05e25cf9763862743cc10f0e08c5845709 +nodepath-m/44'/60'/0'/25/ , relayerIndex-42 , address-f2ddb30cf7d55a7b6b0e145e3c7939b8739e4344 +nodepath-m/44'/60'/0'/25/ , relayerIndex-43 , address-f4f65be469417b001629da1b9a2e61acc2741c43 +nodepath-m/44'/60'/0'/25/ , relayerIndex-44 , address-e60a518e736094615c51c51c15ab57c0a24da37d +nodepath-m/44'/60'/0'/25/ , relayerIndex-45 , address-b166b56689f79c36620448a9ac6e008ffd5e93e6 +nodepath-m/44'/60'/0'/25/ , relayerIndex-46 , address-8e9a49548c8e2c7fd08e4b8588c4e3ce9d7cff60 +nodepath-m/44'/60'/0'/25/ , relayerIndex-47 , address-68b99a8f7edd54f74ba89b86e22c5445731eec89 +nodepath-m/44'/60'/0'/25/ , relayerIndex-48 , address-52c4f1065d02051c10328dc73bd54dd4333933f4 +nodepath-m/44'/60'/0'/25/ , relayerIndex-49 , address-4e4498d160464f1cd444b3b9f70cb4e5f69cfb59 +nodepath-m/44'/60'/0'/26/ , relayerIndex-0 , address-bfcf23132fb006697ebafa39113b176653cc44ba +nodepath-m/44'/60'/0'/26/ , relayerIndex-1 , address-3593e34138b06d2de4955884ce2a268ff761e076 +nodepath-m/44'/60'/0'/26/ , relayerIndex-2 , address-95c89224556e975572f99656c03492b589382f7a +nodepath-m/44'/60'/0'/26/ , relayerIndex-3 , address-d10c1b79d8d5a7f59180b3b590a2d65c18dfc613 +nodepath-m/44'/60'/0'/26/ , relayerIndex-4 , address-aa81369943001b33f323c5c635fa8e4d6ba6519a +nodepath-m/44'/60'/0'/26/ , relayerIndex-5 , address-14791615dfc51e07941b6dc783950d15ec58cf66 +nodepath-m/44'/60'/0'/26/ , relayerIndex-6 , address-18b91e3871aa3b2ecc016c3e621712ec1bbd912c +nodepath-m/44'/60'/0'/26/ , relayerIndex-7 , address-384106b40393eb83fc223266a9f280776acdbe8b +nodepath-m/44'/60'/0'/26/ , relayerIndex-8 , address-77f59a0354e08bd5d5cb2afb1a4b67ca9ccab239 +nodepath-m/44'/60'/0'/26/ , relayerIndex-9 , address-d5ef4271d763cc62f47556ca8cc1425aae053636 +nodepath-m/44'/60'/0'/26/ , relayerIndex-10 , address-084c2cb838e341dc0c2646e590a6a0514d6c3db2 +nodepath-m/44'/60'/0'/26/ , relayerIndex-11 , address-4cf1a6ad43e1deebc281b671d08b66dd5e2aee60 +nodepath-m/44'/60'/0'/26/ , relayerIndex-12 , address-cc0b3cbf5ba7cc267b2e6b992978466003d60666 +nodepath-m/44'/60'/0'/26/ , relayerIndex-13 , address-d4dd8eb98af5cb7ff52eab92d1c0cb0fd9eed8a0 +nodepath-m/44'/60'/0'/26/ , relayerIndex-14 , address-9564edf31157925cfcea72028ed383c94fe4b313 +nodepath-m/44'/60'/0'/26/ , relayerIndex-15 , address-892536315610b2461582bbcd2732f1392b57f6de +nodepath-m/44'/60'/0'/26/ , relayerIndex-16 , address-1f9c6022a7f6077485e01e747fbaca74c0d26ae6 +nodepath-m/44'/60'/0'/26/ , relayerIndex-17 , address-001631cdb62f6abffb709de429ba1a4c1efa19e2 +nodepath-m/44'/60'/0'/26/ , relayerIndex-18 , address-11881a45c9488ee901f7550ba84a4566ff09c752 +nodepath-m/44'/60'/0'/26/ , relayerIndex-19 , address-bab504d77ebad26bd4792f12b4fdd94c8c399154 +nodepath-m/44'/60'/0'/26/ , relayerIndex-20 , address-63b3a750780a3310b9364b9b31dbab7d7a02c7c4 +nodepath-m/44'/60'/0'/26/ , relayerIndex-21 , address-dd8373b398d3816630f13b6fd47f0cdaa7c9dc5e +nodepath-m/44'/60'/0'/26/ , relayerIndex-22 , address-d2b2073cf11cf5806606c899b87edd32c61bbfdc +nodepath-m/44'/60'/0'/26/ , relayerIndex-23 , address-785c2cac87947eaf70678e32f36dfac6b0a8628e +nodepath-m/44'/60'/0'/26/ , relayerIndex-24 , address-84b36e4cf798d9334ee79dacc829680ae39c2aa2 +nodepath-m/44'/60'/0'/26/ , relayerIndex-25 , address-52f863072be5e8e863893ab5696aa40fca4854df +nodepath-m/44'/60'/0'/26/ , relayerIndex-26 , address-a092cba4de189ff612ee5d4c6cefc8ce0a92ece4 +nodepath-m/44'/60'/0'/26/ , relayerIndex-27 , address-683f9c7a402ce3a3392fd6adaf1d3f9bb8cdf104 +nodepath-m/44'/60'/0'/26/ , relayerIndex-28 , address-e478ec4f9982a2299c28916b453413d85c74097b +nodepath-m/44'/60'/0'/26/ , relayerIndex-29 , address-acc539107c7535c63af00a5670ed88ba0961a53a +nodepath-m/44'/60'/0'/26/ , relayerIndex-30 , address-3260d3a9137659a4ee82edd44d3aa8a6cc255293 +nodepath-m/44'/60'/0'/26/ , relayerIndex-31 , address-df2d9de68434de13ead4a6e6ac4e4780b816be80 +nodepath-m/44'/60'/0'/26/ , relayerIndex-32 , address-05d9d2b904f2e502a430aef6648c21a8188d1322 +nodepath-m/44'/60'/0'/26/ , relayerIndex-33 , address-4c667949c5ba5e114d80b948999ac1bf21e68251 +nodepath-m/44'/60'/0'/26/ , relayerIndex-34 , address-c6882427762fc3d1850dd502b69f3b3e0257a4e1 +nodepath-m/44'/60'/0'/26/ , relayerIndex-35 , address-a281e11a381131109a1ba64e69fd3671fd62f23e +nodepath-m/44'/60'/0'/26/ , relayerIndex-36 , address-339b0e092548669bca439688f716dac031fdc87e +nodepath-m/44'/60'/0'/26/ , relayerIndex-37 , address-af791d0a1de3a3d634a641b843ec7489b3588140 +nodepath-m/44'/60'/0'/26/ , relayerIndex-38 , address-1079503da6f45f0927c6544f4be129d2270739b8 +nodepath-m/44'/60'/0'/26/ , relayerIndex-39 , address-fec5c09c755c02209cebf177b3392032120d422d +nodepath-m/44'/60'/0'/26/ , relayerIndex-40 , address-b3a571117d6758e834dfc51d9f61871b648828a0 +nodepath-m/44'/60'/0'/26/ , relayerIndex-41 , address-9a6d248a3daa1f34f9e6a5e6ef70039e3f90b983 +nodepath-m/44'/60'/0'/26/ , relayerIndex-42 , address-2612871b81ff1460524637254ca95e85154e90b2 +nodepath-m/44'/60'/0'/26/ , relayerIndex-43 , address-521855b6ec4d026ea92d21c3063d0d4f8e629704 +nodepath-m/44'/60'/0'/26/ , relayerIndex-44 , address-c73bdb3b30b2ec409c93687d5cf5bbba3ae8db7f +nodepath-m/44'/60'/0'/26/ , relayerIndex-45 , address-2f7eeb53884932363b19ed0e0124b84d75e36284 +nodepath-m/44'/60'/0'/26/ , relayerIndex-46 , address-d79efbadc4dff9c6e63b52d7654b76fd519bf56b +nodepath-m/44'/60'/0'/26/ , relayerIndex-47 , address-d2972e3211a8473ea908b18f5c3e5f4cabd4dd27 +nodepath-m/44'/60'/0'/26/ , relayerIndex-48 , address-6faf717fd4ff5c611b65aa596e5b954cd47cc61e +nodepath-m/44'/60'/0'/26/ , relayerIndex-49 , address-98ac1ec9496905835c5f196b7accbb9d195f92cb +nodepath-m/44'/60'/0'/27/ , relayerIndex-0 , address-187084284889e943d862159ef31967c86cf26075 +nodepath-m/44'/60'/0'/27/ , relayerIndex-1 , address-17926ff11f77e8d19552856c7296e5434cd5034e +nodepath-m/44'/60'/0'/27/ , relayerIndex-2 , address-4c757e44567a449806d3d47998c2f2a928ae621a +nodepath-m/44'/60'/0'/27/ , relayerIndex-3 , address-ec3cbd120c7e153e74062d21589b6312d4fe6215 +nodepath-m/44'/60'/0'/27/ , relayerIndex-4 , address-3124dcf42595e5263a5ae7ac1f7c6b740f7f785c +nodepath-m/44'/60'/0'/27/ , relayerIndex-5 , address-a245e59cb5f3cee70bea6f3c8846b90d0b13d37e +nodepath-m/44'/60'/0'/27/ , relayerIndex-6 , address-4d41dcade980b1d7ddec2a5770c4e18cf7cd5ae4 +nodepath-m/44'/60'/0'/27/ , relayerIndex-7 , address-bb1cf9027bb7b9130b589a8df54ec9fb228eaf00 +nodepath-m/44'/60'/0'/27/ , relayerIndex-8 , address-9c6abeef4adc2b27ce74ba316049c2fcea864df2 +nodepath-m/44'/60'/0'/27/ , relayerIndex-9 , address-324cf01b5736128fb8947f5eca463d0b6b914ec4 +nodepath-m/44'/60'/0'/27/ , relayerIndex-10 , address-5b99f17f4cba3f8282ebfc78377332341b3e13bb +nodepath-m/44'/60'/0'/27/ , relayerIndex-11 , address-675deae8c54cd6dbe22ee6b85f9c84af015aa4b6 +nodepath-m/44'/60'/0'/27/ , relayerIndex-12 , address-03ad1a89364c37c88f240e4b9156f75c57155a3f +nodepath-m/44'/60'/0'/27/ , relayerIndex-13 , address-c5f1473b068aa89cb25b083832cb11925c9c146a +nodepath-m/44'/60'/0'/27/ , relayerIndex-14 , address-cda2ab281a8c7c3b46381cccd842e43f7ffd49bf +nodepath-m/44'/60'/0'/27/ , relayerIndex-15 , address-7e57130be874dcef332c1169e4d781dc1fea662f +nodepath-m/44'/60'/0'/27/ , relayerIndex-16 , address-47a7dad0372ad93255dded98132caad0678d6533 +nodepath-m/44'/60'/0'/27/ , relayerIndex-17 , address-aa3df990a713a2abdae5dd023c2406f4ce88cbaa +nodepath-m/44'/60'/0'/27/ , relayerIndex-18 , address-852226a4b2a21fbca97a628fc3f4815def09d0be +nodepath-m/44'/60'/0'/27/ , relayerIndex-19 , address-ea0115fb34baadc48bcd006955ab995f28748e22 +nodepath-m/44'/60'/0'/27/ , relayerIndex-20 , address-6af66f492b4264a22d198408d25936cc8a5ec20d +nodepath-m/44'/60'/0'/27/ , relayerIndex-21 , address-d8bbeb86ae29a09abdc618493c12153570759fc5 +nodepath-m/44'/60'/0'/27/ , relayerIndex-22 , address-30a8bfd7578ef09f60bbd6f224d9fe9fe5f82bcf +nodepath-m/44'/60'/0'/27/ , relayerIndex-23 , address-77dedc376685e3d3b321e160e97602d6294e58e6 +nodepath-m/44'/60'/0'/27/ , relayerIndex-24 , address-201e0a496850dffe30eca2d282ba4bd609a2d83a +nodepath-m/44'/60'/0'/27/ , relayerIndex-25 , address-de7a1b50ff6911c5d78ec07aa01f8ae1c9ac932c +nodepath-m/44'/60'/0'/27/ , relayerIndex-26 , address-0556f5c9128c59a919dd15dfc759604495dfea8a +nodepath-m/44'/60'/0'/27/ , relayerIndex-27 , address-4e0484fc0a887c9d2062dd37e666f11a59e5c651 +nodepath-m/44'/60'/0'/27/ , relayerIndex-28 , address-0b488079a8f82265f06e04fb83fad53c8fb3b186 +nodepath-m/44'/60'/0'/27/ , relayerIndex-29 , address-ef3f00876b2705b00222b9b2138fee642c551801 +nodepath-m/44'/60'/0'/27/ , relayerIndex-30 , address-4a0d34ef57fa10a0b4e7218406cfa1de7c73738b +nodepath-m/44'/60'/0'/27/ , relayerIndex-31 , address-55fdd325d944afedb410e2114b1adde43731eaa7 +nodepath-m/44'/60'/0'/27/ , relayerIndex-32 , address-0d63d32874ccafc9083057c1754ee5ce5bfe2fab +nodepath-m/44'/60'/0'/27/ , relayerIndex-33 , address-5a155cc493aa1121f83c6315c87ce97b193e2dd2 +nodepath-m/44'/60'/0'/27/ , relayerIndex-34 , address-6b436a11805426c977b9fad1557aac2e404ec24f +nodepath-m/44'/60'/0'/27/ , relayerIndex-35 , address-dde17f5b0b160f11297f607b5ebb914f091ced38 +nodepath-m/44'/60'/0'/27/ , relayerIndex-36 , address-979b564bcd91b3c1337cf660d2ba86cc8b446535 +nodepath-m/44'/60'/0'/27/ , relayerIndex-37 , address-71a7150c7a37ce94cd8e8f3cecbf8a4db136e0ba +nodepath-m/44'/60'/0'/27/ , relayerIndex-38 , address-5352ba234f1be21c007ecd465ea3f966cd1959bf +nodepath-m/44'/60'/0'/27/ , relayerIndex-39 , address-76b5728201500bbf3591324802fed737a602f5f4 +nodepath-m/44'/60'/0'/27/ , relayerIndex-40 , address-53a957de73147828cae52899820a8de351b7dc74 +nodepath-m/44'/60'/0'/27/ , relayerIndex-41 , address-58bfac9de2379494fa3b5cfff11ba436127343a2 +nodepath-m/44'/60'/0'/27/ , relayerIndex-42 , address-9c23f09e41e924d47f5afdc1d507679ac17c9d34 +nodepath-m/44'/60'/0'/27/ , relayerIndex-43 , address-48fc75dc94e605408943a44934c842fe44d7766b +nodepath-m/44'/60'/0'/27/ , relayerIndex-44 , address-bc5ae6c59ca95d57dfc4cc41e7a55df6f843045c +nodepath-m/44'/60'/0'/27/ , relayerIndex-45 , address-ab93cf83fdc48a797da5b04624909a5626500a36 +nodepath-m/44'/60'/0'/27/ , relayerIndex-46 , address-40229294776c4156f7fef3f5e1cfef484087c902 +nodepath-m/44'/60'/0'/27/ , relayerIndex-47 , address-d63c6aa9a6994f30e297db5e537a3e1c2016be20 +nodepath-m/44'/60'/0'/27/ , relayerIndex-48 , address-65848140727458bc23227aebcd4425f1a000c722 +nodepath-m/44'/60'/0'/27/ , relayerIndex-49 , address-568f26aff8f9ccca1318034e5dc5666d2723fc26 +nodepath-m/44'/60'/0'/28/ , relayerIndex-0 , address-38d87b27429ed06b6f7d71076d4821ecd7954d91 +nodepath-m/44'/60'/0'/28/ , relayerIndex-1 , address-b2d3048c914ebf03cb7b278c25abea89291d3238 +nodepath-m/44'/60'/0'/28/ , relayerIndex-2 , address-e6a5320be3819266ca04e551793ef72cc88c2331 +nodepath-m/44'/60'/0'/28/ , relayerIndex-3 , address-23f111d2a95b3a5033a979ec3e5b6e3239eba9f8 +nodepath-m/44'/60'/0'/28/ , relayerIndex-4 , address-b700f06af195e40050e15d6b4e171a68a654ff39 +nodepath-m/44'/60'/0'/28/ , relayerIndex-5 , address-43857fd0548730ca15c0e1d23bbfa27d557ea59c +nodepath-m/44'/60'/0'/28/ , relayerIndex-6 , address-dbd126be7eb1006329db5b9cb98ea7b99df6e99a +nodepath-m/44'/60'/0'/28/ , relayerIndex-7 , address-c11ca4ff56c7723090de30d7238b885f24436e70 +nodepath-m/44'/60'/0'/28/ , relayerIndex-8 , address-5e4f06807820aa0647c1c4ac1b125c91b47f8139 +nodepath-m/44'/60'/0'/28/ , relayerIndex-9 , address-d6fa77290b43f6e0d74b1f7f597c5b620143e6c1 +nodepath-m/44'/60'/0'/28/ , relayerIndex-10 , address-e5276cb094e707c9edb49c08a37a908f0973c6f6 +nodepath-m/44'/60'/0'/28/ , relayerIndex-11 , address-0f5919d19928496c6458b2002043613619413169 +nodepath-m/44'/60'/0'/28/ , relayerIndex-12 , address-e5410400e152a86a84f9748be2758b2a2b78354c +nodepath-m/44'/60'/0'/28/ , relayerIndex-13 , address-3cb4c53e93da7f2a6870a7f417ac1bf7b7ecb1a5 +nodepath-m/44'/60'/0'/28/ , relayerIndex-14 , address-6ce12f4a390d2b65613a4acdabbfe91ee0a7515e +nodepath-m/44'/60'/0'/28/ , relayerIndex-15 , address-b86a0e0a9a2d9a75b0ac3e44746092faf9c9252e +nodepath-m/44'/60'/0'/28/ , relayerIndex-16 , address-94d21f4e953aa821bb3e7658a5f070ed0e8eff47 +nodepath-m/44'/60'/0'/28/ , relayerIndex-17 , address-2de4e59c82734fb226701b1ae93cf28809c5e623 +nodepath-m/44'/60'/0'/28/ , relayerIndex-18 , address-3e88f7b4968c0fb3366f9847cd9bdb03f900fc5b +nodepath-m/44'/60'/0'/28/ , relayerIndex-19 , address-1b047bfcb0c6f691ba8bdf9283ca41b3543639c8 +nodepath-m/44'/60'/0'/28/ , relayerIndex-20 , address-73a080b74f5b2ce395aacfbc043ed3aead815f9a +nodepath-m/44'/60'/0'/28/ , relayerIndex-21 , address-28e437df585f0728cb18a24b39334257d640802c +nodepath-m/44'/60'/0'/28/ , relayerIndex-22 , address-783eebed01d0448c6215b39b9dfadd9ab971f973 +nodepath-m/44'/60'/0'/28/ , relayerIndex-23 , address-c2d7b390562ae577c782f4ae0fa44e7e7b908b43 +nodepath-m/44'/60'/0'/28/ , relayerIndex-24 , address-c3fa03f21bb248c180f19d5907832b8c7a10e85a +nodepath-m/44'/60'/0'/28/ , relayerIndex-25 , address-c0c42fd56a3584adc0e15257261462ad9218a454 +nodepath-m/44'/60'/0'/28/ , relayerIndex-26 , address-0a31bd3df58eceeb5e54086beff3ab4a84c96e3a +nodepath-m/44'/60'/0'/28/ , relayerIndex-27 , address-0ec31b56aafeed60daedb3e39e1bfb6b016a7654 +nodepath-m/44'/60'/0'/28/ , relayerIndex-28 , address-29919d57f157ca1758498d1371bbe560634b1bc5 +nodepath-m/44'/60'/0'/28/ , relayerIndex-29 , address-2f3261e8858de74051674e72f0fda53151dd7239 +nodepath-m/44'/60'/0'/28/ , relayerIndex-30 , address-de8dc09e2bfcf126523c5bfeb82932dcb5e169e0 +nodepath-m/44'/60'/0'/28/ , relayerIndex-31 , address-844d1fd97abd1abc19ba1fe9ad6cae9af1544884 +nodepath-m/44'/60'/0'/28/ , relayerIndex-32 , address-8c4810e6a3a07ff003e16edd8670fb6014b99faa +nodepath-m/44'/60'/0'/28/ , relayerIndex-33 , address-c9ebbaac834aa9a320baf994e4915c6c82e210b6 +nodepath-m/44'/60'/0'/28/ , relayerIndex-34 , address-eb61f959b1b8025c1899250b7a0a393d6ba23911 +nodepath-m/44'/60'/0'/28/ , relayerIndex-35 , address-7cd0db4a5d716e1aed60f018a3b4a6613d27c469 +nodepath-m/44'/60'/0'/28/ , relayerIndex-36 , address-8fcdf854921998c8c761cc9f10e68591891a2cb6 +nodepath-m/44'/60'/0'/28/ , relayerIndex-37 , address-30bf0ffe2bc7000a87ec9bc95ae5b0ee480366e5 +nodepath-m/44'/60'/0'/28/ , relayerIndex-38 , address-8d2576a1c485809b8aed64496ca9f7fe94502973 +nodepath-m/44'/60'/0'/28/ , relayerIndex-39 , address-92dbe8948c3f234101f74418633a8f1e29fa0c9f +nodepath-m/44'/60'/0'/28/ , relayerIndex-40 , address-92faacc116281c1869f94445aff8e169a4032e48 +nodepath-m/44'/60'/0'/28/ , relayerIndex-41 , address-7aa656477b4c031b306bf7f9a6ca96df3eeefb01 +nodepath-m/44'/60'/0'/28/ , relayerIndex-42 , address-1eb685e7b9dac1bc022639b4b3fb0d1afe1a5ad3 +nodepath-m/44'/60'/0'/28/ , relayerIndex-43 , address-6c3b3e94b94798942172c57f2197013c38d4b21f +nodepath-m/44'/60'/0'/28/ , relayerIndex-44 , address-0f01f700dd90722da0ed38a690a5115d3bec6b56 +nodepath-m/44'/60'/0'/28/ , relayerIndex-45 , address-65592a8776b3f01bacaa546bfffe5a234cf272fc +nodepath-m/44'/60'/0'/28/ , relayerIndex-46 , address-e36589165e433f173b586be1deda9b2641373d77 +nodepath-m/44'/60'/0'/28/ , relayerIndex-47 , address-e195e191634c916a2179955b4fe3223ace97c4ff +nodepath-m/44'/60'/0'/28/ , relayerIndex-48 , address-a84e3caeb723e14e1ae21bf982701d36df77cfe4 +nodepath-m/44'/60'/0'/28/ , relayerIndex-49 , address-a448409a6766dc69bdebd1abbb2cf66d7f4e3d63 +nodepath-m/44'/60'/0'/29/ , relayerIndex-0 , address-30a95ea0b46f36e61a7c28d70962595e3916f219 +nodepath-m/44'/60'/0'/29/ , relayerIndex-1 , address-cea0b49bc0b66ccc0c84c08740c9669c091caecf +nodepath-m/44'/60'/0'/29/ , relayerIndex-2 , address-7d1787cf4e5c46da2aa991b2ab7d4b89e4560693 +nodepath-m/44'/60'/0'/29/ , relayerIndex-3 , address-9e4dc55b239712d0be676b09b2e8fe84fe4f11bc +nodepath-m/44'/60'/0'/29/ , relayerIndex-4 , address-a2c1a286057df042fd212ca2368f8c78ab7a5064 +nodepath-m/44'/60'/0'/29/ , relayerIndex-5 , address-3839b4f7ba44707e16849f89e1c20fa9848c3972 +nodepath-m/44'/60'/0'/29/ , relayerIndex-6 , address-1e5aff067bafe5ee47963ca77ba71b02bfc0bf8f +nodepath-m/44'/60'/0'/29/ , relayerIndex-7 , address-c7b47474230a49af086c2e69a141ca7841c425c1 +nodepath-m/44'/60'/0'/29/ , relayerIndex-8 , address-0198f8399f1714c6c8e06c8c5d29aa5c3fa00c2e +nodepath-m/44'/60'/0'/29/ , relayerIndex-9 , address-a727b901748ec03708d99a39365723703b80654b +nodepath-m/44'/60'/0'/29/ , relayerIndex-10 , address-b3ab3ebbc1494a09cfcd90b78f84e9c523db476b +nodepath-m/44'/60'/0'/29/ , relayerIndex-11 , address-02c96af7ee10b108b84ba16d077cf974dccf1e5c +nodepath-m/44'/60'/0'/29/ , relayerIndex-12 , address-4216fd1158421501e1db83bfd082e4baad3ac7dd +nodepath-m/44'/60'/0'/29/ , relayerIndex-13 , address-069bdc16c975f413e5e3e2ebd57728d70f1d8e2f +nodepath-m/44'/60'/0'/29/ , relayerIndex-14 , address-19fb38729d8dd90ebe36879e4781a8f613f87811 +nodepath-m/44'/60'/0'/29/ , relayerIndex-15 , address-41eeb81a3e9c01afe352172e4803b09028d13d84 +nodepath-m/44'/60'/0'/29/ , relayerIndex-16 , address-a26851c20eb36c97dd7e7e096fd99bbda768253c +nodepath-m/44'/60'/0'/29/ , relayerIndex-17 , address-52935f07f8d7bdbe22835d689e8f87f325eaf9b3 +nodepath-m/44'/60'/0'/29/ , relayerIndex-18 , address-78e52cb494c945f79abd451bfd3dee6d56a61b7b +nodepath-m/44'/60'/0'/29/ , relayerIndex-19 , address-45cf0b7c9fe1e473b8b35363f1fdaf46c1dd2045 +nodepath-m/44'/60'/0'/29/ , relayerIndex-20 , address-7d19362ce80bebc331fc5fce3abbaf5734c1f477 +nodepath-m/44'/60'/0'/29/ , relayerIndex-21 , address-60bd72bf0819758f336cc68d5dcad90c4d65d46a +nodepath-m/44'/60'/0'/29/ , relayerIndex-22 , address-8403cfbafa971451aaa25a625b4fe91cc3bf60bd +nodepath-m/44'/60'/0'/29/ , relayerIndex-23 , address-e86afe0988c7da4922c39b8b4337f778b598efdc +nodepath-m/44'/60'/0'/29/ , relayerIndex-24 , address-7f4737c17061dcae6b24423bbff01c2a6fc8737e +nodepath-m/44'/60'/0'/29/ , relayerIndex-25 , address-ba3cd9278600cb215329dc127088c7d17f75df30 +nodepath-m/44'/60'/0'/29/ , relayerIndex-26 , address-4b2294bc852392558ed887cfafdaa4e91358c600 +nodepath-m/44'/60'/0'/29/ , relayerIndex-27 , address-00dac2c70d271c4512994bb89f82c7b41b8c994b +nodepath-m/44'/60'/0'/29/ , relayerIndex-28 , address-492ab5eaaacec0b12b8ea0a4f5b45a16a2a678fc +nodepath-m/44'/60'/0'/29/ , relayerIndex-29 , address-f183da9f78ffa66bf7fe685971b0fbe2bb3ff983 +nodepath-m/44'/60'/0'/29/ , relayerIndex-30 , address-b3a9de0e81feffeadcb387166d7d7a3972ef8a1e +nodepath-m/44'/60'/0'/29/ , relayerIndex-31 , address-51d439a5c34ea04cec115e6fb6f0227e7c0a7d22 +nodepath-m/44'/60'/0'/29/ , relayerIndex-32 , address-2729f64429bbe67e34a0cfabb20cd3ad1d604aed +nodepath-m/44'/60'/0'/29/ , relayerIndex-33 , address-0a1b675f0d6f2d023ad9e7d1a05254dbdcbbf091 +nodepath-m/44'/60'/0'/29/ , relayerIndex-34 , address-48b4934ffa0e0e420babd39e2614c0c0af85855b +nodepath-m/44'/60'/0'/29/ , relayerIndex-35 , address-db3fa4be5a87a20e2f36a9b65c975771aeaa95d9 +nodepath-m/44'/60'/0'/29/ , relayerIndex-36 , address-ddb5029dcbe2211752a2dc9202d02785bc6e33aa +nodepath-m/44'/60'/0'/29/ , relayerIndex-37 , address-e0c7a12270749ff0cec8985eb8626dec8f0a1312 +nodepath-m/44'/60'/0'/29/ , relayerIndex-38 , address-1d0c9f0468c51e23ff397232473957aba8dfd315 +nodepath-m/44'/60'/0'/29/ , relayerIndex-39 , address-827b0ef79803a51590a984396741a2e2bc6568c7 +nodepath-m/44'/60'/0'/29/ , relayerIndex-40 , address-ea08a1edcdcedb5ccfaf2e273187a769046721be +nodepath-m/44'/60'/0'/29/ , relayerIndex-41 , address-8c1a0e110cb2ac8913c26f802fd4513a210e7d26 +nodepath-m/44'/60'/0'/29/ , relayerIndex-42 , address-dbce1cd0ccf522c5ec0ac26111ca1c8dc730f26f +nodepath-m/44'/60'/0'/29/ , relayerIndex-43 , address-4ad75ed20659aa78f730328399419755d0daaa9d +nodepath-m/44'/60'/0'/29/ , relayerIndex-44 , address-0b604871f6ed43ebc5e6c33f5b711d0fcad4d9ea +nodepath-m/44'/60'/0'/29/ , relayerIndex-45 , address-71b84eb3fd922f7c521d8d4b555efb69b434bbe1 +nodepath-m/44'/60'/0'/29/ , relayerIndex-46 , address-ce7fe22102ae5b655d10a89f8a5b288692751272 +nodepath-m/44'/60'/0'/29/ , relayerIndex-47 , address-1a2ab560a12647f4f9bb62acc011619420cd14a5 +nodepath-m/44'/60'/0'/29/ , relayerIndex-48 , address-023f801817511f113a55708ed82a520ec5d96e64 +nodepath-m/44'/60'/0'/29/ , relayerIndex-49 , address-81094e35d7fb44b2a27dbbd83d96d1a4279f30ec +nodepath-m/44'/60'/0'/30/ , relayerIndex-0 , address-9cbe032337098a4efcb47f619c274c75438909ee +nodepath-m/44'/60'/0'/30/ , relayerIndex-1 , address-5c722adc2c4290cdfca08984019db09eeb17936a +nodepath-m/44'/60'/0'/30/ , relayerIndex-2 , address-114d374550e364ac51ca101cc07afc46cb6d5b99 +nodepath-m/44'/60'/0'/30/ , relayerIndex-3 , address-61834c3b19b2649b585027460a9268ed847c5c81 +nodepath-m/44'/60'/0'/30/ , relayerIndex-4 , address-9ec35d1703181e098914c7a029886195038ffa9a +nodepath-m/44'/60'/0'/30/ , relayerIndex-5 , address-0707b812053b99fe22799c83bf7d2f4bac15aa70 +nodepath-m/44'/60'/0'/30/ , relayerIndex-6 , address-dcea0456c80f4536e5652f5c65f7f865f78d6e89 +nodepath-m/44'/60'/0'/30/ , relayerIndex-7 , address-be66cf6982e538f551caae48eb8e744e4ec7d25b +nodepath-m/44'/60'/0'/30/ , relayerIndex-8 , address-c76416c5ef39674add9cd3b7dd1bf9d312c4eeb4 +nodepath-m/44'/60'/0'/30/ , relayerIndex-9 , address-fffbf7f4fdb0001221c17c0f96a1239ff20a964f +nodepath-m/44'/60'/0'/30/ , relayerIndex-10 , address-f9b3abba55fab008d6dd800931fdc06c7540d535 +nodepath-m/44'/60'/0'/30/ , relayerIndex-11 , address-2859173f589ceeb604d89acae8e09f98cf85565d +nodepath-m/44'/60'/0'/30/ , relayerIndex-12 , address-5f7a66f3e1a484e1383e53d84ff2b09e26e160fc +nodepath-m/44'/60'/0'/30/ , relayerIndex-13 , address-dd1e82fd1c638187a6ded90a594848de03b6b2b3 +nodepath-m/44'/60'/0'/30/ , relayerIndex-14 , address-95d3c57495c75a2a2333a0f5f318a2251aff1985 +nodepath-m/44'/60'/0'/30/ , relayerIndex-15 , address-d3854cf97d507633e4a1833e140e800281e90c47 +nodepath-m/44'/60'/0'/30/ , relayerIndex-16 , address-b6a522d5294d819fb494f71c754b3705fd45805d +nodepath-m/44'/60'/0'/30/ , relayerIndex-17 , address-df60a1f18ed7f39784fdeacf3b7ebe4ab9de4818 +nodepath-m/44'/60'/0'/30/ , relayerIndex-18 , address-b586a88551a6fd987285643fedb62d2546f27e06 +nodepath-m/44'/60'/0'/30/ , relayerIndex-19 , address-51c036d564418f463c49e666546b48b16ce7969d +nodepath-m/44'/60'/0'/30/ , relayerIndex-20 , address-77f58f2c48a2694eaa7eb61e63fcf1224a5506c3 +nodepath-m/44'/60'/0'/30/ , relayerIndex-21 , address-9160d980471227c84408d341951bbfb7ad6f45d0 +nodepath-m/44'/60'/0'/30/ , relayerIndex-22 , address-0b40c839278e588981fef7a3148c78df5f6cd1d2 +nodepath-m/44'/60'/0'/30/ , relayerIndex-23 , address-d5786bb5a04954e385cf548993dac69a65614cf8 +nodepath-m/44'/60'/0'/30/ , relayerIndex-24 , address-c2cee6409da5824bccede76d65220401c7b098f6 +nodepath-m/44'/60'/0'/30/ , relayerIndex-25 , address-1c3c578e33721a3262149c76d026da39c106f1b9 +nodepath-m/44'/60'/0'/30/ , relayerIndex-26 , address-c79e7ffbcba0d32f4add3b51a6f89f2dc8cbb3f9 +nodepath-m/44'/60'/0'/30/ , relayerIndex-27 , address-84f0a5e7be1dcf869c95ab23bdd5daf3b915bf49 +nodepath-m/44'/60'/0'/30/ , relayerIndex-28 , address-821ae0da8ef56caae315b15df4a38088024ba700 +nodepath-m/44'/60'/0'/30/ , relayerIndex-29 , address-2a91cd43996956014584e86b8b4d3cdb2f3121ef +nodepath-m/44'/60'/0'/30/ , relayerIndex-30 , address-f067cf56c985716b4086bc15dff2058770e1b56f +nodepath-m/44'/60'/0'/30/ , relayerIndex-31 , address-5a5399fa80d6cdb1e19e91d846a6763d155d594f +nodepath-m/44'/60'/0'/30/ , relayerIndex-32 , address-4c6c19795fb3b674ace5e810ca6c5753dcf2dfe6 +nodepath-m/44'/60'/0'/30/ , relayerIndex-33 , address-5e290e8cbb109c938ae84a180ce1cf8372a0726c +nodepath-m/44'/60'/0'/30/ , relayerIndex-34 , address-03af96a7d83362c810ee6f06af745df5f63b8b18 +nodepath-m/44'/60'/0'/30/ , relayerIndex-35 , address-aa2b4c12d380d648a9eb90c331df5c0f7efdbb91 +nodepath-m/44'/60'/0'/30/ , relayerIndex-36 , address-43c84d00e3033f3bd0ed55dd910149209d4085fc +nodepath-m/44'/60'/0'/30/ , relayerIndex-37 , address-e675787da8aca63b44c477a9b98552ddb13eb1f6 +nodepath-m/44'/60'/0'/30/ , relayerIndex-38 , address-d583ca7bd3405fc326468d092d3a224b79da102f +nodepath-m/44'/60'/0'/30/ , relayerIndex-39 , address-d09d130f8c81d5405828da658b77233b79ee4aa7 +nodepath-m/44'/60'/0'/30/ , relayerIndex-40 , address-d227b053c7ef7a2b628b9b78bcce967d15c99ff5 +nodepath-m/44'/60'/0'/30/ , relayerIndex-41 , address-f87ab45a620906e89626442a7dc2ee26075b35a2 +nodepath-m/44'/60'/0'/30/ , relayerIndex-42 , address-e30cdea9052e9380386cb60ce48770117007c275 +nodepath-m/44'/60'/0'/30/ , relayerIndex-43 , address-d917469ce62552eb99fb0c88f8e661e5fc9f16c9 +nodepath-m/44'/60'/0'/30/ , relayerIndex-44 , address-3d6f95ec3c275d9690429d330f207d9b3f3d72ae +nodepath-m/44'/60'/0'/30/ , relayerIndex-45 , address-55752b5fa907746b02caf0b47ed6a8684327f66e +nodepath-m/44'/60'/0'/30/ , relayerIndex-46 , address-0ad61324747b81ec8a04f91343825e3d7145d040 +nodepath-m/44'/60'/0'/30/ , relayerIndex-47 , address-11f9270d21ef6a454d69b825e539f99fc5f4af80 +nodepath-m/44'/60'/0'/30/ , relayerIndex-48 , address-05b2dc3a1f3edf9838a40c02bc248bd5ef291cb4 +nodepath-m/44'/60'/0'/30/ , relayerIndex-49 , address-9aa14b4ec1c28f12f9b0f5b5e9c1b7e5ab89312b +nodepath-m/44'/60'/0'/31/ , relayerIndex-0 , address-e9b51abc7558445ac3cebdfdd058580df412e62f +nodepath-m/44'/60'/0'/31/ , relayerIndex-1 , address-c1786e3b113904032b20b24772dfb3762f314226 +nodepath-m/44'/60'/0'/31/ , relayerIndex-2 , address-01d821a386d80ba5b2ce945dad147b208b7031d7 +nodepath-m/44'/60'/0'/31/ , relayerIndex-3 , address-a153bd4a372477fb8545a6c5cde920cca59661c6 +nodepath-m/44'/60'/0'/31/ , relayerIndex-4 , address-69615dc64b9678597745b21af7ad2748a497eaa8 +nodepath-m/44'/60'/0'/31/ , relayerIndex-5 , address-408bbff1dac7bfe0c6beed6a81994592bb063ee3 +nodepath-m/44'/60'/0'/31/ , relayerIndex-6 , address-b31b5cf43f8b21bf7028d08dfd3fead716ac0945 +nodepath-m/44'/60'/0'/31/ , relayerIndex-7 , address-6cc1dd8bc75dd59e1fe8af177e9f2b5f213f3023 +nodepath-m/44'/60'/0'/31/ , relayerIndex-8 , address-56b1f5a88a8da818d965ade7c94627ce2d2f42a8 +nodepath-m/44'/60'/0'/31/ , relayerIndex-9 , address-ddbcf0cdb7994c155977f49ea517472a409515ee +nodepath-m/44'/60'/0'/31/ , relayerIndex-10 , address-b584a683d29c370c01771583f2f56af38535e3a4 +nodepath-m/44'/60'/0'/31/ , relayerIndex-11 , address-a76c061d48f89a0cf764719d828d0bc24374fda4 +nodepath-m/44'/60'/0'/31/ , relayerIndex-12 , address-b7765668412c04c6c8268fddbc972082398d3726 +nodepath-m/44'/60'/0'/31/ , relayerIndex-13 , address-072f9a634f3c132445c361848cf8efd5742dbddd +nodepath-m/44'/60'/0'/31/ , relayerIndex-14 , address-5437adb11bb25b11cc33ddc09ca9a17e4816394d +nodepath-m/44'/60'/0'/31/ , relayerIndex-15 , address-aec4a41a8126fe9ff26a5904d87ce0ef9bf7412d +nodepath-m/44'/60'/0'/31/ , relayerIndex-16 , address-b5cfdd0f80c08d0a4a992be0fb0533b8b743e905 +nodepath-m/44'/60'/0'/31/ , relayerIndex-17 , address-dd9413e7cf168772d7f2852515c6b20c55d6c0ac +nodepath-m/44'/60'/0'/31/ , relayerIndex-18 , address-b2c51afefee167005060d1b754725c3fe31845df +nodepath-m/44'/60'/0'/31/ , relayerIndex-19 , address-19151b617a412a5bfe65d823e64ac8be4922aaa9 +nodepath-m/44'/60'/0'/31/ , relayerIndex-20 , address-f126e7a2c177c49841c358a03a302c59bfd5764d +nodepath-m/44'/60'/0'/31/ , relayerIndex-21 , address-3fba74960f8aeb66e29ec39142e4c4a72a5b069b +nodepath-m/44'/60'/0'/31/ , relayerIndex-22 , address-9d0a6d3b1ea2d759618555a35babeaf01a540668 +nodepath-m/44'/60'/0'/31/ , relayerIndex-23 , address-dc86f126a3c7a15cacd990904c216105bd0b83a3 +nodepath-m/44'/60'/0'/31/ , relayerIndex-24 , address-4728c79c60828e2fb8827ab6c693610dfcc87dba +nodepath-m/44'/60'/0'/31/ , relayerIndex-25 , address-a7ad1476d0148037ffbe69461b3e4d8a5107d331 +nodepath-m/44'/60'/0'/31/ , relayerIndex-26 , address-74eabec44ef3cbc4ea1e6ec2242ac4a677bf5c1c +nodepath-m/44'/60'/0'/31/ , relayerIndex-27 , address-1d9b44116660e6a0759b1e7565cba896108590c2 +nodepath-m/44'/60'/0'/31/ , relayerIndex-28 , address-18c64be03685a021efbf5d98aa651591903fbabf +nodepath-m/44'/60'/0'/31/ , relayerIndex-29 , address-efebe56680def317f0d9f8a0f759b266ff20c573 +nodepath-m/44'/60'/0'/31/ , relayerIndex-30 , address-f451879edf9c772eab8428140c9179534c6184b6 +nodepath-m/44'/60'/0'/31/ , relayerIndex-31 , address-005cf6d8ff1a68edd024f44087b36c65f49b0dd6 +nodepath-m/44'/60'/0'/31/ , relayerIndex-32 , address-5100878fea86fe849c6c59dcf7b230de227c1ead +nodepath-m/44'/60'/0'/31/ , relayerIndex-33 , address-6f953c85889e0de631cdd1fa5f88447fc07a0ac5 +nodepath-m/44'/60'/0'/31/ , relayerIndex-34 , address-7b266f818a56afe17707cfc70595decf365fdeae +nodepath-m/44'/60'/0'/31/ , relayerIndex-35 , address-12757060c5bf658f1fb88deced527f23cfc8bf1f +nodepath-m/44'/60'/0'/31/ , relayerIndex-36 , address-a7aa3f78b7ff97bb9f84a89d5a0082234da451c6 +nodepath-m/44'/60'/0'/31/ , relayerIndex-37 , address-c17d8a77659a7697b2ba30f4f5eaa85ff15fc904 +nodepath-m/44'/60'/0'/31/ , relayerIndex-38 , address-6a37c528b194efb891c351713353651a5d251292 +nodepath-m/44'/60'/0'/31/ , relayerIndex-39 , address-c96db0affc56d6a4093918d9ff1343a18a0b5228 +nodepath-m/44'/60'/0'/31/ , relayerIndex-40 , address-80a3388aa1fdc7208c5f73cee688d2bb70464b2e +nodepath-m/44'/60'/0'/31/ , relayerIndex-41 , address-841b32ff54b91d54f32362be0fd79928e8e85424 +nodepath-m/44'/60'/0'/31/ , relayerIndex-42 , address-d01f13565d87eff7efe5f1e8d19363c40db55a37 +nodepath-m/44'/60'/0'/31/ , relayerIndex-43 , address-5f302df158c53fb1d32ae99a5944813cdf865111 +nodepath-m/44'/60'/0'/31/ , relayerIndex-44 , address-0d64709362e721bb5ca8a4aca7df8bfe8743cfd0 +nodepath-m/44'/60'/0'/31/ , relayerIndex-45 , address-8a1e39d7408529598bc3a5a5e632a90679e27f80 +nodepath-m/44'/60'/0'/31/ , relayerIndex-46 , address-7103ea6ab1dbe1720d7ab73259af29ba73911970 +nodepath-m/44'/60'/0'/31/ , relayerIndex-47 , address-826723ee9978b3659050519df88995853f5642e1 +nodepath-m/44'/60'/0'/31/ , relayerIndex-48 , address-47240cd3646ce2402161a20a2ff3f52fe06350de +nodepath-m/44'/60'/0'/31/ , relayerIndex-49 , address-24149904a470daa0fe079bafb2dab9542fc8b3ee +nodepath-m/44'/60'/0'/32/ , relayerIndex-0 , address-a2affaaf23983d6a1fb72078ae2e9ba781c086d4 +nodepath-m/44'/60'/0'/32/ , relayerIndex-1 , address-9f4f6fb1f0fdf9113b9b8dcc8dc69bd771ef2bc4 +nodepath-m/44'/60'/0'/32/ , relayerIndex-2 , address-8e9191f96d377f7cfe6fd711bc9e1d924c04d2e9 +nodepath-m/44'/60'/0'/32/ , relayerIndex-3 , address-80f89abaece6498e22a3d3bbd8e9b367be242864 +nodepath-m/44'/60'/0'/32/ , relayerIndex-4 , address-ea6d3430c40986fb1b788154a961f40a883f8ba9 +nodepath-m/44'/60'/0'/32/ , relayerIndex-5 , address-cceeee45b12b81f9882e490fe64101555a7ae7da +nodepath-m/44'/60'/0'/32/ , relayerIndex-6 , address-d496019012336941a8d26e02634100609b77b454 +nodepath-m/44'/60'/0'/32/ , relayerIndex-7 , address-94415674e625b0203ad85194569860042cee29e6 +nodepath-m/44'/60'/0'/32/ , relayerIndex-8 , address-cb372ae89d04cd2cb0ebb97868fbbba10c2eb454 +nodepath-m/44'/60'/0'/32/ , relayerIndex-9 , address-90d21bbcd85c5d7a3f97a86d99acf6afa40a3e0c +nodepath-m/44'/60'/0'/32/ , relayerIndex-10 , address-cf0606a1518176f109eadd608faaf0c4c437d366 +nodepath-m/44'/60'/0'/32/ , relayerIndex-11 , address-e6b84292c31f6d7025e35e6295c4e1abb3a527a8 +nodepath-m/44'/60'/0'/32/ , relayerIndex-12 , address-100621c08abd523a8323022fb4e20d67dacd7a3a +nodepath-m/44'/60'/0'/32/ , relayerIndex-13 , address-2ac5ba2fd6b7a7b08d23ff37311ddb08bdc11e58 +nodepath-m/44'/60'/0'/32/ , relayerIndex-14 , address-679cdb4c8276424d06c04143202681e26ffee943 +nodepath-m/44'/60'/0'/32/ , relayerIndex-15 , address-1cbd9d01ddf5d3c6728bf2e0d4ee042b41cf0b04 +nodepath-m/44'/60'/0'/32/ , relayerIndex-16 , address-66fd3327dbfe9958050ce9ec9df4a87d6286432e +nodepath-m/44'/60'/0'/32/ , relayerIndex-17 , address-0a405fdf87c71387f12f1ffeabba407b2e49b051 +nodepath-m/44'/60'/0'/32/ , relayerIndex-18 , address-5a06683811db94089fc6d4564d45080acc9bb633 +nodepath-m/44'/60'/0'/32/ , relayerIndex-19 , address-a26952f1253a97044188029806d227731cd89047 +nodepath-m/44'/60'/0'/32/ , relayerIndex-20 , address-6af8016fde5f743660f98d4485921e46e6662fd7 +nodepath-m/44'/60'/0'/32/ , relayerIndex-21 , address-06e66f22d776f0378db4cb7f47c721e1d689c260 +nodepath-m/44'/60'/0'/32/ , relayerIndex-22 , address-eaec3af4aa9e1613049633c6ba872a37c740289f +nodepath-m/44'/60'/0'/32/ , relayerIndex-23 , address-4ad601032a66ed49c25fef3169546c3851a4977e +nodepath-m/44'/60'/0'/32/ , relayerIndex-24 , address-edf082287427484eb114e716dadbc1583fdfcc73 +nodepath-m/44'/60'/0'/32/ , relayerIndex-25 , address-c57395528f9152dc853348c86d5da5d44f293e51 +nodepath-m/44'/60'/0'/32/ , relayerIndex-26 , address-df7ee827f5df02612803f0f8b95446cb7b6a18c1 +nodepath-m/44'/60'/0'/32/ , relayerIndex-27 , address-94df7e62c92fe1d667a45ae1a18b40d287459f4a +nodepath-m/44'/60'/0'/32/ , relayerIndex-28 , address-b9a62f7e62a12310236fb30b9e95e2a1e4dac77b +nodepath-m/44'/60'/0'/32/ , relayerIndex-29 , address-3eda51e112e9cb63b6c32a3cc3b444219d3f4b15 +nodepath-m/44'/60'/0'/32/ , relayerIndex-30 , address-703614b7b9727a94f5a7a15b1afe2e19ed4d7bf4 +nodepath-m/44'/60'/0'/32/ , relayerIndex-31 , address-b077423f4e0b57b3792615827652adeb46a8183f +nodepath-m/44'/60'/0'/32/ , relayerIndex-32 , address-0765ba2b99690599652d4938c24bec199207d5e1 +nodepath-m/44'/60'/0'/32/ , relayerIndex-33 , address-c776d6bae3400edac4353e93efaa7c094d9566a7 +nodepath-m/44'/60'/0'/32/ , relayerIndex-34 , address-992ebd342a687ace253bbca9754b5487cf878c2f +nodepath-m/44'/60'/0'/32/ , relayerIndex-35 , address-94f77777d0caddcf2bf768ad56c9864606a9a243 +nodepath-m/44'/60'/0'/32/ , relayerIndex-36 , address-f8da8002ca4d101de79bec134f45ad758702227a +nodepath-m/44'/60'/0'/32/ , relayerIndex-37 , address-28c7a153c28fc7bf5435aadf4fc00446d880a428 +nodepath-m/44'/60'/0'/32/ , relayerIndex-38 , address-8657642c4572bdc8e57e6c7853e85ef1210e9be7 +nodepath-m/44'/60'/0'/32/ , relayerIndex-39 , address-08e0ce3589b3396f0ab137b3345d62d564576212 +nodepath-m/44'/60'/0'/32/ , relayerIndex-40 , address-897d1555a9743fa41ffa51bae8e65606455bb6f1 +nodepath-m/44'/60'/0'/32/ , relayerIndex-41 , address-5d792c97f74dda40499151d6d9e6cdd60ab7bce2 +nodepath-m/44'/60'/0'/32/ , relayerIndex-42 , address-48ca2b1752ebf6af0a183474bc655396fb46e0d8 +nodepath-m/44'/60'/0'/32/ , relayerIndex-43 , address-b3c6d297964fd4b93b5a247044aa9b55eaf1db56 +nodepath-m/44'/60'/0'/32/ , relayerIndex-44 , address-663d6e1f84ce7878df8645980c85aa17756c0e1d +nodepath-m/44'/60'/0'/32/ , relayerIndex-45 , address-8ecf93bc415fb0b407ccc50e1f5afe0952044ae5 +nodepath-m/44'/60'/0'/32/ , relayerIndex-46 , address-04dad1fd41404f4ee60569b79b8929160b9c37c5 +nodepath-m/44'/60'/0'/32/ , relayerIndex-47 , address-c8daa91f9d1ae4e1befe1707b3a4c47c55b5b613 +nodepath-m/44'/60'/0'/32/ , relayerIndex-48 , address-ebfdd03d83bfa944b27683de67ba99b80b26e5b4 +nodepath-m/44'/60'/0'/32/ , relayerIndex-49 , address-00b8f56ce6e5a66a0081858c25db26815edcda1f +nodepath-m/44'/60'/0'/33/ , relayerIndex-0 , address-9a452385b63228f4fa3a939d2e0430bbdb2cedc1 +nodepath-m/44'/60'/0'/33/ , relayerIndex-1 , address-e742c6c2b04c03855fd9e68686ff54c84738b0f6 +nodepath-m/44'/60'/0'/33/ , relayerIndex-2 , address-7cc88269f94abe321ae3ab8acadc0e5a062026b2 +nodepath-m/44'/60'/0'/33/ , relayerIndex-3 , address-ccbb9575f17941ba60a6e9fbf63a0e0cc7b37b29 +nodepath-m/44'/60'/0'/33/ , relayerIndex-4 , address-8bc5926077b68a4280f22537198aa56f785e1079 +nodepath-m/44'/60'/0'/33/ , relayerIndex-5 , address-7e3645d7b3b978be619ae580390e6d38b62d36d3 +nodepath-m/44'/60'/0'/33/ , relayerIndex-6 , address-7ed79d2582822e4868d0fed514e20163de593e34 +nodepath-m/44'/60'/0'/33/ , relayerIndex-7 , address-e2f51b419fd60eae0f22db93cd58a26e9ade441a +nodepath-m/44'/60'/0'/33/ , relayerIndex-8 , address-0dc63c645fd0f6523d4e049ff5f99b0f2bd3c46d +nodepath-m/44'/60'/0'/33/ , relayerIndex-9 , address-cac550b1441b2d77ec518d30c3e2bf249641380f +nodepath-m/44'/60'/0'/33/ , relayerIndex-10 , address-a7e2dd45740a27613ed9a804f4fe7d3fa43c035a +nodepath-m/44'/60'/0'/33/ , relayerIndex-11 , address-33e7ac2d39c350f668ee2f0742118a659926b183 +nodepath-m/44'/60'/0'/33/ , relayerIndex-12 , address-fc23dac02b33227e2baa4ffea3f29eaab9907be7 +nodepath-m/44'/60'/0'/33/ , relayerIndex-13 , address-9f274d6b0fe3eaf2e2d5c6a2c01518b592ac1ac2 +nodepath-m/44'/60'/0'/33/ , relayerIndex-14 , address-69fc572b4242d2f9d6dd95f4149d3f1b13aec282 +nodepath-m/44'/60'/0'/33/ , relayerIndex-15 , address-5900c14fffcaad5a62d3f141d1e47defb3b3d601 +nodepath-m/44'/60'/0'/33/ , relayerIndex-16 , address-a3003371ebb1a39b49b9ee207d4aea713aa28f23 +nodepath-m/44'/60'/0'/33/ , relayerIndex-17 , address-3454a1dd0ed7896ed72dab5e3ffce47d3cd5ff7c +nodepath-m/44'/60'/0'/33/ , relayerIndex-18 , address-a5732c2699c81cffc19256f2d2e055f59bce456f +nodepath-m/44'/60'/0'/33/ , relayerIndex-19 , address-905ccbeebc0944284a998a5bc776652ac929833f +nodepath-m/44'/60'/0'/33/ , relayerIndex-20 , address-14825e78a2ae0f6ee92741a725009e8b3848eac3 +nodepath-m/44'/60'/0'/33/ , relayerIndex-21 , address-3a957774900e4abf6cc47e6a8f817d434cd94d6a +nodepath-m/44'/60'/0'/33/ , relayerIndex-22 , address-d1d1e2e6eb0d5d2885f7b62a641e26975126af69 +nodepath-m/44'/60'/0'/33/ , relayerIndex-23 , address-c4bffbd0c0feb030e27c662547cd86c1ae6a9ae1 +nodepath-m/44'/60'/0'/33/ , relayerIndex-24 , address-efab36e498925f3d380cc74ba0f45e292aa3a696 +nodepath-m/44'/60'/0'/33/ , relayerIndex-25 , address-0d392451a15fb69f44c23db9b66ef41e4a6c3aa8 +nodepath-m/44'/60'/0'/33/ , relayerIndex-26 , address-f43ccdf6913087dfbae8ab4d869b2d8e643a61aa +nodepath-m/44'/60'/0'/33/ , relayerIndex-27 , address-d8971cff418bc3fb70bacef8455ddbcc4e77009e +nodepath-m/44'/60'/0'/33/ , relayerIndex-28 , address-bfc4af4cfbb155541fcfe096164f7a60a0b1c9fc +nodepath-m/44'/60'/0'/33/ , relayerIndex-29 , address-ea7dd5d61bde8d500028b8eafb6f02259600cc72 +nodepath-m/44'/60'/0'/33/ , relayerIndex-30 , address-0eec163557db1a9f661101c7704b3bbab19ab18e +nodepath-m/44'/60'/0'/33/ , relayerIndex-31 , address-a87c08842ab8134bd331b5180800db523f9b8b6a +nodepath-m/44'/60'/0'/33/ , relayerIndex-32 , address-d9387af5d5d8edaee0eaea9a0d5e50c69ee1396e +nodepath-m/44'/60'/0'/33/ , relayerIndex-33 , address-ca9b9c886aed8a68b2bd9aa329de1c32efc81222 +nodepath-m/44'/60'/0'/33/ , relayerIndex-34 , address-93e85546fa8d8eeade7187d712be27c485bb3451 +nodepath-m/44'/60'/0'/33/ , relayerIndex-35 , address-b818cd7eff4eba54d99ca46cebf5771a84ad1189 +nodepath-m/44'/60'/0'/33/ , relayerIndex-36 , address-9919d18e9e85b221bceef65ac06e857c30c6828c +nodepath-m/44'/60'/0'/33/ , relayerIndex-37 , address-abd7568d76771d498739e307a133dea597e020bd +nodepath-m/44'/60'/0'/33/ , relayerIndex-38 , address-bb5643945158e7c04339e0d3869f16696057774c +nodepath-m/44'/60'/0'/33/ , relayerIndex-39 , address-43bd7261442fc5bff2f827083cc462cc06adfa85 +nodepath-m/44'/60'/0'/33/ , relayerIndex-40 , address-912926a33508644c2557ce9a58849ec6f3687011 +nodepath-m/44'/60'/0'/33/ , relayerIndex-41 , address-f9bb4cb6564662fb93fb252b7f32f0c91eabfad0 +nodepath-m/44'/60'/0'/33/ , relayerIndex-42 , address-dfd52e2bd09c21367b5367bbb466f40f517d4f0d +nodepath-m/44'/60'/0'/33/ , relayerIndex-43 , address-5df8ecbca30f29dd9cd2a651dfba093a8432e991 +nodepath-m/44'/60'/0'/33/ , relayerIndex-44 , address-34346cb44260e1c80fbf8fc8101de604e80ab5c4 +nodepath-m/44'/60'/0'/33/ , relayerIndex-45 , address-f3d32b43b4db642cdd93272aa6f7f0299bab5b5e +nodepath-m/44'/60'/0'/33/ , relayerIndex-46 , address-25e3334f389f5fc204596f9c186cd442a809f52e +nodepath-m/44'/60'/0'/33/ , relayerIndex-47 , address-786790584fa91a5abdbd72a5eeac650194f3278e +nodepath-m/44'/60'/0'/33/ , relayerIndex-48 , address-9f1a098378ab72bc0787ad8cfca75764f00f48d6 +nodepath-m/44'/60'/0'/33/ , relayerIndex-49 , address-ac292594044f7135f1fe898c7ed351388a364d47 +nodepath-m/44'/60'/0'/34/ , relayerIndex-0 , address-74d9a934b72c9320601338009fef796b3589bf32 +nodepath-m/44'/60'/0'/34/ , relayerIndex-1 , address-74e8dd232f736902ff0cc3a33cee47c092967df1 +nodepath-m/44'/60'/0'/34/ , relayerIndex-2 , address-c870ab154510e534c60011b1aa1390a4aa50a2a9 +nodepath-m/44'/60'/0'/34/ , relayerIndex-3 , address-8352522221a37af9ab741e716d57294042d3fe28 +nodepath-m/44'/60'/0'/34/ , relayerIndex-4 , address-e49b64ee9ee0f057d24cbe2515711f390dc96335 +nodepath-m/44'/60'/0'/34/ , relayerIndex-5 , address-1c1559642a19436225f04ede3ef02e522de6965b +nodepath-m/44'/60'/0'/34/ , relayerIndex-6 , address-55c77710298c5c77ab14f45fb7e50fdb2eb9e604 +nodepath-m/44'/60'/0'/34/ , relayerIndex-7 , address-7d06a74b3d8f63ee765926db72600f97e5a99323 +nodepath-m/44'/60'/0'/34/ , relayerIndex-8 , address-34b8f580a620c3f43d9468163a6baf0c69f937e1 +nodepath-m/44'/60'/0'/34/ , relayerIndex-9 , address-eb305ca344f148b5a59b67c6857480f608645369 +nodepath-m/44'/60'/0'/34/ , relayerIndex-10 , address-322a296f0c0434445ef094b898545290f5301926 +nodepath-m/44'/60'/0'/34/ , relayerIndex-11 , address-3de703b735c003b8c9a1c48df4d8c999fc3795ee +nodepath-m/44'/60'/0'/34/ , relayerIndex-12 , address-1fc3cd433cf2bcd23091cf5b8228cc76f0637b59 +nodepath-m/44'/60'/0'/34/ , relayerIndex-13 , address-47add4044c603e7cc8dd1d97a421b03292baa910 +nodepath-m/44'/60'/0'/34/ , relayerIndex-14 , address-71551b1242a7fc207148457303bca77f2643fe8e +nodepath-m/44'/60'/0'/34/ , relayerIndex-15 , address-8fea92dc4432f66543ff9bccb5a04841da681056 +nodepath-m/44'/60'/0'/34/ , relayerIndex-16 , address-299c49ce4a2638910dc46301e827162aa7adf624 +nodepath-m/44'/60'/0'/34/ , relayerIndex-17 , address-bc027d72504b37a1b76f80e0db511ef07e1ad52f +nodepath-m/44'/60'/0'/34/ , relayerIndex-18 , address-e07024fa1ae1fbc974b6c55ed227dd7bb6340773 +nodepath-m/44'/60'/0'/34/ , relayerIndex-19 , address-f19d9777eb8497bc2153114cf37ac1f30da0727d +nodepath-m/44'/60'/0'/34/ , relayerIndex-20 , address-c78c94ddc47c0f4652b1cd07a474ea60b9bfc76b +nodepath-m/44'/60'/0'/34/ , relayerIndex-21 , address-2978e5bf4131f495395c933f6d655bdfdd08b5d0 +nodepath-m/44'/60'/0'/34/ , relayerIndex-22 , address-771d57b286773a8cf33aedce14883bd1cd019f64 +nodepath-m/44'/60'/0'/34/ , relayerIndex-23 , address-cd19b0701cd0133e8ec623f5ade1ea35549e856c +nodepath-m/44'/60'/0'/34/ , relayerIndex-24 , address-af92b455126d788909e15a7cadf4b3e6c38c0e20 +nodepath-m/44'/60'/0'/34/ , relayerIndex-25 , address-cbdccf3ea2f4aa4e96729acda213bf5bb0e46a42 +nodepath-m/44'/60'/0'/34/ , relayerIndex-26 , address-770f12d486608bdcf0817dc8fdd345eee3389167 +nodepath-m/44'/60'/0'/34/ , relayerIndex-27 , address-9b630a645f1b6180f9f836b4133cf12f86b8d421 +nodepath-m/44'/60'/0'/34/ , relayerIndex-28 , address-ed2790676d4c45268f40bd204dbc4cb07b58f7bb +nodepath-m/44'/60'/0'/34/ , relayerIndex-29 , address-186d88725a698d92115f26308269ff79cb0c8c89 +nodepath-m/44'/60'/0'/34/ , relayerIndex-30 , address-0d1dd1a727abec9659f304ce4ff0fdf3546f6589 +nodepath-m/44'/60'/0'/34/ , relayerIndex-31 , address-f853d6d89ac0a865758f1e42b4bca501c5996a89 +nodepath-m/44'/60'/0'/34/ , relayerIndex-32 , address-238dd11989b05d6c5e4e9c523fdc165c62ef1582 +nodepath-m/44'/60'/0'/34/ , relayerIndex-33 , address-29085a5abbfb580076d810b4b36c42d23703da47 +nodepath-m/44'/60'/0'/34/ , relayerIndex-34 , address-798ad41d717c76a8e680a14646e82ac7f6ea7505 +nodepath-m/44'/60'/0'/34/ , relayerIndex-35 , address-686b063ad3f9af3651e340f1aaf51ac6d241829d +nodepath-m/44'/60'/0'/34/ , relayerIndex-36 , address-9faf6c3dc504d31ea7f875feea208ac7ad138991 +nodepath-m/44'/60'/0'/34/ , relayerIndex-37 , address-a2c0b54e7aa48f8e33feb6b53d3732429831c630 +nodepath-m/44'/60'/0'/34/ , relayerIndex-38 , address-cc55b290425505763d5318ef287a31a705952cf3 +nodepath-m/44'/60'/0'/34/ , relayerIndex-39 , address-599b69d7df984371cb96bd0ee0a850909cda5e45 +nodepath-m/44'/60'/0'/34/ , relayerIndex-40 , address-bc74e5566312bb09e011d721c0a10609d4777d94 +nodepath-m/44'/60'/0'/34/ , relayerIndex-41 , address-21b81ab2715d100216b1145030f64007ee6d6dfb +nodepath-m/44'/60'/0'/34/ , relayerIndex-42 , address-eb1f3d7152776d13ef549214e5d8e31a1f46f867 +nodepath-m/44'/60'/0'/34/ , relayerIndex-43 , address-c800d42d1ffa54e16d224cf81b1a3db9e97def3e +nodepath-m/44'/60'/0'/34/ , relayerIndex-44 , address-f21add73850f175109c5e0c8745beee0d25707f6 +nodepath-m/44'/60'/0'/34/ , relayerIndex-45 , address-f0f1ddf3ca1a5be1e2d78b2e96ba38ef3b94509b +nodepath-m/44'/60'/0'/34/ , relayerIndex-46 , address-cc5043461b74fe04ac554ccc9798c851b35fec30 +nodepath-m/44'/60'/0'/34/ , relayerIndex-47 , address-7d6b5b77926b895b947b5de988ce762c471f1afb +nodepath-m/44'/60'/0'/34/ , relayerIndex-48 , address-b0cd90c16a8b0702efdd6440c99e910a389f8216 +nodepath-m/44'/60'/0'/34/ , relayerIndex-49 , address-ab8d959285f2f0370807fe2c63b60b3ca042e6ba +nodepath-m/44'/60'/0'/35/ , relayerIndex-0 , address-704cabe2deb13f1e0f98389f997bf30461c4a0ca +nodepath-m/44'/60'/0'/35/ , relayerIndex-1 , address-31466673394600507ddb44c4074eb91800bf8161 +nodepath-m/44'/60'/0'/35/ , relayerIndex-2 , address-be2738edea8a8205ae67363fedaa5fdeb8fc8a77 +nodepath-m/44'/60'/0'/35/ , relayerIndex-3 , address-b6457ddcfffa51dc5fd207b00153ae38554c2d99 +nodepath-m/44'/60'/0'/35/ , relayerIndex-4 , address-4c93581b828cfcf23e237de82e00235fdbcc0708 +nodepath-m/44'/60'/0'/35/ , relayerIndex-5 , address-18d847d733f0988617f8b51213038f5b18eb2445 +nodepath-m/44'/60'/0'/35/ , relayerIndex-6 , address-c1a9b96869c0721c9e679d1dd2ee7f94601cda70 +nodepath-m/44'/60'/0'/35/ , relayerIndex-7 , address-46a341a9bab1b706ac84d1c9630bc7e6965df962 +nodepath-m/44'/60'/0'/35/ , relayerIndex-8 , address-cc8168a83f80e4db0d7561000d9373eeb3526687 +nodepath-m/44'/60'/0'/35/ , relayerIndex-9 , address-720f517fa2a2d31eab2225e31fbd7c9b5933f87d +nodepath-m/44'/60'/0'/35/ , relayerIndex-10 , address-7604170fa4e31fc9d7b89e364b112ccd2e64ece2 +nodepath-m/44'/60'/0'/35/ , relayerIndex-11 , address-2d36328d8f06e7881f97dbaac9b16cc18fee4644 +nodepath-m/44'/60'/0'/35/ , relayerIndex-12 , address-4d42e7e83e95c0c814da0e65ee559c633234835a +nodepath-m/44'/60'/0'/35/ , relayerIndex-13 , address-0fbb7e168471215cd2d16366e23ef1c65bc4cc96 +nodepath-m/44'/60'/0'/35/ , relayerIndex-14 , address-3b8972b4a5fdeaaaa40818fcb20a4df6baee21b1 +nodepath-m/44'/60'/0'/35/ , relayerIndex-15 , address-f97388edb72b0bed0369f6ae8c119f84cfb67fa6 +nodepath-m/44'/60'/0'/35/ , relayerIndex-16 , address-5317d593ec3a41fbda19a9ec883bd4339be6b6b1 +nodepath-m/44'/60'/0'/35/ , relayerIndex-17 , address-1ee04514b07e4a109b8fb1966389dd4aefd79e6b +nodepath-m/44'/60'/0'/35/ , relayerIndex-18 , address-a89d0acc682df41404b9e8a4aa1af5214d94a484 +nodepath-m/44'/60'/0'/35/ , relayerIndex-19 , address-705f2f54de410e5a7dd3941e3378a855845566d1 +nodepath-m/44'/60'/0'/35/ , relayerIndex-20 , address-e4f7949637c5e1443722e704c5f9a15314c8dfd3 +nodepath-m/44'/60'/0'/35/ , relayerIndex-21 , address-e82b2f73386a40050fec2e3234bc02d426f36568 +nodepath-m/44'/60'/0'/35/ , relayerIndex-22 , address-082d1fb687bd86adfa9c5992f1d1e78676023907 +nodepath-m/44'/60'/0'/35/ , relayerIndex-23 , address-3299f1b22a2030c4c46dc984d9bf0cb7e9a0b5cd +nodepath-m/44'/60'/0'/35/ , relayerIndex-24 , address-0195c68a78a72b6b3ce7a02feb6ae3ecb25e0f0e +nodepath-m/44'/60'/0'/35/ , relayerIndex-25 , address-4f2938cdac66f3dca0218c6ec3c6219db5a13861 +nodepath-m/44'/60'/0'/35/ , relayerIndex-26 , address-81696689f661ce6e5e3515c4a4c6fafd585b6b5f +nodepath-m/44'/60'/0'/35/ , relayerIndex-27 , address-370dea168b16472e4191a6e0807c5edc24b4ab6f +nodepath-m/44'/60'/0'/35/ , relayerIndex-28 , address-7e9ca3391a358e14b02f041cf0d2dd842eb596e9 +nodepath-m/44'/60'/0'/35/ , relayerIndex-29 , address-96ee5d55f2876005faffc74e5822c0900d3fa8b8 +nodepath-m/44'/60'/0'/35/ , relayerIndex-30 , address-b3000649775283b3d97db6a64055f668fcb3e3a8 +nodepath-m/44'/60'/0'/35/ , relayerIndex-31 , address-6735ce9fcb6855f9198711dd8d940768bd3ad25c +nodepath-m/44'/60'/0'/35/ , relayerIndex-32 , address-fc414af2f61ee21fc68464a9c1fc396fe73b73cf +nodepath-m/44'/60'/0'/35/ , relayerIndex-33 , address-cad6d84b910c1dc0562a315ea6270ecd68958e40 +nodepath-m/44'/60'/0'/35/ , relayerIndex-34 , address-4f6de3124046230e0b1d065a7f3cb7d9dd72ec43 +nodepath-m/44'/60'/0'/35/ , relayerIndex-35 , address-1e97da54420e423cdab4e181250a46c0b6651981 +nodepath-m/44'/60'/0'/35/ , relayerIndex-36 , address-7e1425532a0e0d2be625a2f8f4a1cd8e90fe63a3 +nodepath-m/44'/60'/0'/35/ , relayerIndex-37 , address-bec5675e9da708bc7568c8718037d789d05b4a04 +nodepath-m/44'/60'/0'/35/ , relayerIndex-38 , address-4b1c0543c6c487611b7689edb8cbef93d7b00a54 +nodepath-m/44'/60'/0'/35/ , relayerIndex-39 , address-bbadb55b07ccb21f127a3e5c6fd48d8a11320b3b +nodepath-m/44'/60'/0'/35/ , relayerIndex-40 , address-9ad0d77f1c98d9273f07422a9a50c3b6be4868f4 +nodepath-m/44'/60'/0'/35/ , relayerIndex-41 , address-175f03d6a27a9c785cef594565f8b48a0ff180a3 +nodepath-m/44'/60'/0'/35/ , relayerIndex-42 , address-682e77df968843876170cf3483ee7eb8eb25acde +nodepath-m/44'/60'/0'/35/ , relayerIndex-43 , address-f0201a6254efccef4cfd979f8029a6bd2d8fb215 +nodepath-m/44'/60'/0'/35/ , relayerIndex-44 , address-cb2903c6b0199ec0ce13cd0eeefa49d83723f622 +nodepath-m/44'/60'/0'/35/ , relayerIndex-45 , address-008b0cc71777f28fe7bef4df1693373f8c598cdc +nodepath-m/44'/60'/0'/35/ , relayerIndex-46 , address-10ce803a168878c8b6ae900dd43b5154456d5907 +nodepath-m/44'/60'/0'/35/ , relayerIndex-47 , address-8d21696d48cdc48fd2a2ca3ca3bb3d1a28551216 +nodepath-m/44'/60'/0'/35/ , relayerIndex-48 , address-a4b3d923e221d035bcb1fc7af67875a71f00fe7b +nodepath-m/44'/60'/0'/35/ , relayerIndex-49 , address-aacc7e40ce587ace9c5df01e93cce7370b5d1ad0 +nodepath-m/44'/60'/0'/36/ , relayerIndex-0 , address-75627891760812f0abdc79c9f3fc9c4f5772a7ef +nodepath-m/44'/60'/0'/36/ , relayerIndex-1 , address-e8d5b5bee2b93f217923e6b286894294fcb9036a +nodepath-m/44'/60'/0'/36/ , relayerIndex-2 , address-2f4cc3c62e811215a6b7c87569ccc3307e60cfc5 +nodepath-m/44'/60'/0'/36/ , relayerIndex-3 , address-ca8a8e18d818c8326e7cde8357ba3c599961ae93 +nodepath-m/44'/60'/0'/36/ , relayerIndex-4 , address-140668fc0d462f60779820430eebfd98539b0c58 +nodepath-m/44'/60'/0'/36/ , relayerIndex-5 , address-09d0cbc89d5997bf4464f52e1d1dcc949159c57c +nodepath-m/44'/60'/0'/36/ , relayerIndex-6 , address-f56d83a31ce335bb7a70279fd180d2f5e4ec55d6 +nodepath-m/44'/60'/0'/36/ , relayerIndex-7 , address-c021e8fc772e4236d19e6940f0bdf49ec447b69b +nodepath-m/44'/60'/0'/36/ , relayerIndex-8 , address-c1db19c2433ce92e2916ea5e03f861a95b2abd21 +nodepath-m/44'/60'/0'/36/ , relayerIndex-9 , address-2e79afd531fd8c7a24f5f47490e6d99dc344a32a +nodepath-m/44'/60'/0'/36/ , relayerIndex-10 , address-4e6dcf19cd7bab6fb69a9487c479fced0a021dee +nodepath-m/44'/60'/0'/36/ , relayerIndex-11 , address-b898f710a640dc6fad3d7034a1a7c02264c815b4 +nodepath-m/44'/60'/0'/36/ , relayerIndex-12 , address-b31281d66afd92a51ae35b2a09fb86f3f4bfcdf0 +nodepath-m/44'/60'/0'/36/ , relayerIndex-13 , address-8b0662e8fb78b6dbad899ddaeab1823a4501204b +nodepath-m/44'/60'/0'/36/ , relayerIndex-14 , address-6d964fd5985351b6d0cd77ba6b9f246b859240db +nodepath-m/44'/60'/0'/36/ , relayerIndex-15 , address-372150429ffb8b32e5e5d41d1208a1070c05d513 +nodepath-m/44'/60'/0'/36/ , relayerIndex-16 , address-e8527d715680d02684389516e3746404c3548d1f +nodepath-m/44'/60'/0'/36/ , relayerIndex-17 , address-fa0c8b9371c7b1829dd6298b5173fc4e3534f3a7 +nodepath-m/44'/60'/0'/36/ , relayerIndex-18 , address-a9e94aae856c9728f01f488dc9ac3b5f1c7ed92b +nodepath-m/44'/60'/0'/36/ , relayerIndex-19 , address-f8921eebc1e157708ee83bd25fb0e6be2ed7f7ed +nodepath-m/44'/60'/0'/36/ , relayerIndex-20 , address-7b2765335683d5a86edcf1b210029f7c5cd3b545 +nodepath-m/44'/60'/0'/36/ , relayerIndex-21 , address-51f7f32ec3cda2a6e44de88503476d18e47707de +nodepath-m/44'/60'/0'/36/ , relayerIndex-22 , address-62da00210a8364a47c0b2089a2e50c20b8051391 +nodepath-m/44'/60'/0'/36/ , relayerIndex-23 , address-545ca47a8b363a0ae34156af10f3555740b5936a +nodepath-m/44'/60'/0'/36/ , relayerIndex-24 , address-fd7ee40b834dc933e45726809d4fe9ad672b39dc +nodepath-m/44'/60'/0'/36/ , relayerIndex-25 , address-246e2675ee6507ba8be02b35e8d4957fb0cfa730 +nodepath-m/44'/60'/0'/36/ , relayerIndex-26 , address-c3f35944f619193397bfd1492e57228712181e6f +nodepath-m/44'/60'/0'/36/ , relayerIndex-27 , address-44be4b5141f80a62c404ef788193c808081c866c +nodepath-m/44'/60'/0'/36/ , relayerIndex-28 , address-9212e69ce5da281dd46c381b522e0681f206ee94 +nodepath-m/44'/60'/0'/36/ , relayerIndex-29 , address-a4e3f36df7014991c2b7464748b5d96ee60f8ce9 +nodepath-m/44'/60'/0'/36/ , relayerIndex-30 , address-d68f008c1fce9e8d510c0b9db8776d99fef1d277 +nodepath-m/44'/60'/0'/36/ , relayerIndex-31 , address-2ea768d9b13880fbc43590d022861b15b1c0e4d3 +nodepath-m/44'/60'/0'/36/ , relayerIndex-32 , address-3172e80fb79a54800090756877be64c962c465fc +nodepath-m/44'/60'/0'/36/ , relayerIndex-33 , address-ab5b59ab42c7005e2fce14d80acedbbb6cb29a13 +nodepath-m/44'/60'/0'/36/ , relayerIndex-34 , address-191c58c5aaa19b5238b4ce2494e2afc625fbca98 +nodepath-m/44'/60'/0'/36/ , relayerIndex-35 , address-0b2a17ed44583a3cde85bb3d309ef3277752b72c +nodepath-m/44'/60'/0'/36/ , relayerIndex-36 , address-044430f40781f8c3c8a4255fe6f542132e927759 +nodepath-m/44'/60'/0'/36/ , relayerIndex-37 , address-ab7aa2a117a67181ffc0fda5e93c75b5d0467fbc +nodepath-m/44'/60'/0'/36/ , relayerIndex-38 , address-9915ff016387cb8f9fe5b98f9966d93f85c8a71c +nodepath-m/44'/60'/0'/36/ , relayerIndex-39 , address-9df76c6d980da11c2d9086e648a12f307bc6958c +nodepath-m/44'/60'/0'/36/ , relayerIndex-40 , address-bfbf46b9e88cdbba264e2222ef9038be0ad4c404 +nodepath-m/44'/60'/0'/36/ , relayerIndex-41 , address-b279c23e34220dc7b2ed7e8db95afe2da461f0f4 +nodepath-m/44'/60'/0'/36/ , relayerIndex-42 , address-81994342b4631e9d9b852f458286dbb970e17bc2 +nodepath-m/44'/60'/0'/36/ , relayerIndex-43 , address-67eb66a1da586f74ffe5cdd448e71e498fd531d8 +nodepath-m/44'/60'/0'/36/ , relayerIndex-44 , address-197c67c11eef30d39f5e4e4d36f04b822c741f97 +nodepath-m/44'/60'/0'/36/ , relayerIndex-45 , address-94cb76cda77c5bbdc54bdb352e0270e283b2f294 +nodepath-m/44'/60'/0'/36/ , relayerIndex-46 , address-b5c669fae9aca90920d7f2bae6ac8fcfd6c9db23 +nodepath-m/44'/60'/0'/36/ , relayerIndex-47 , address-65842533ed6a2920597786b452cd35540d20e766 +nodepath-m/44'/60'/0'/36/ , relayerIndex-48 , address-3ac05a5af11edf5922cb1c3c9aef0913ccb89960 +nodepath-m/44'/60'/0'/36/ , relayerIndex-49 , address-3d215ed5fc0c4b604a29127c50e2bca748128027 +nodepath-m/44'/60'/0'/37/ , relayerIndex-0 , address-13f69b7a83635ca45c101c9fb497d07783a14143 +nodepath-m/44'/60'/0'/37/ , relayerIndex-1 , address-944172cc2952a15419b83daa727a3f173bbd7685 +nodepath-m/44'/60'/0'/37/ , relayerIndex-2 , address-eade51284179a4d82727ca3329bb2fe8a1756c4c +nodepath-m/44'/60'/0'/37/ , relayerIndex-3 , address-505149ee78cd2005ee94212f64a11300f626d383 +nodepath-m/44'/60'/0'/37/ , relayerIndex-4 , address-fc7b08a5833c63c26fe7ccf775bd6e9f5c92c25c +nodepath-m/44'/60'/0'/37/ , relayerIndex-5 , address-22e5a0d9f69c4c2d77cd76d31747c44b05e5c9dc +nodepath-m/44'/60'/0'/37/ , relayerIndex-6 , address-4b6dd175b3fdfb1415e1e74f01ed1617c19b6896 +nodepath-m/44'/60'/0'/37/ , relayerIndex-7 , address-c2f611b6ba41b8e7a87656f304a1cf64b8344fcb +nodepath-m/44'/60'/0'/37/ , relayerIndex-8 , address-4d2c18945420b91bc6cee133375e6af34acec06c +nodepath-m/44'/60'/0'/37/ , relayerIndex-9 , address-b2a1fd739168567871c6a7d927d7de1cb19bf0a1 +nodepath-m/44'/60'/0'/37/ , relayerIndex-10 , address-152581b03dabd32f6c89152572b57d7d006b3995 +nodepath-m/44'/60'/0'/37/ , relayerIndex-11 , address-74bf62d0794506e7f903dbbe2ae5331fe749c3f0 +nodepath-m/44'/60'/0'/37/ , relayerIndex-12 , address-cf7736f08f092c76bf65621d027b522333f8fdd7 +nodepath-m/44'/60'/0'/37/ , relayerIndex-13 , address-120789ad9d232bc001a00a11e358edd3d181dac1 +nodepath-m/44'/60'/0'/37/ , relayerIndex-14 , address-2941a1260e7c2802cd179af510492c36cc091892 +nodepath-m/44'/60'/0'/37/ , relayerIndex-15 , address-0a5b3fe342a848fff7af57c81b150454ef50f225 +nodepath-m/44'/60'/0'/37/ , relayerIndex-16 , address-448657886c5844bf62eefb3a95c1b8543f2b1a2f +nodepath-m/44'/60'/0'/37/ , relayerIndex-17 , address-52a45d627b3ff8a8110327fa6bc2086941a9fc26 +nodepath-m/44'/60'/0'/37/ , relayerIndex-18 , address-a56c648a8553c728357899b5a4fe956581822fe4 +nodepath-m/44'/60'/0'/37/ , relayerIndex-19 , address-bd656b16724b5161de4fc8f09a5df9775fec961f +nodepath-m/44'/60'/0'/37/ , relayerIndex-20 , address-ed429f1efa06873c4914f8018afce7af9426be4e +nodepath-m/44'/60'/0'/37/ , relayerIndex-21 , address-4f2c285bc95184dfd8da238bad0220fdd1fe155a +nodepath-m/44'/60'/0'/37/ , relayerIndex-22 , address-6263c406301e501a09f5c92b756ed304ad25b915 +nodepath-m/44'/60'/0'/37/ , relayerIndex-23 , address-2fee85d7f4631cd42f35853aba2c20bf50b8bc9b +nodepath-m/44'/60'/0'/37/ , relayerIndex-24 , address-019dfe64d5586ca1c2ee8e2bb914aad33ef546ea +nodepath-m/44'/60'/0'/37/ , relayerIndex-25 , address-1f702660030f96c8c978788a671477a2c72142ce +nodepath-m/44'/60'/0'/37/ , relayerIndex-26 , address-8ba7e1478e0ef565fdcd866751c22aafb47492d9 +nodepath-m/44'/60'/0'/37/ , relayerIndex-27 , address-65ede153f689544d7e88a410eb422ea49ca4b909 +nodepath-m/44'/60'/0'/37/ , relayerIndex-28 , address-55d3d332b15a36ca42d522037645c172561157cc +nodepath-m/44'/60'/0'/37/ , relayerIndex-29 , address-75e0a2262233dc86771deefa95460cb8e72b13a5 +nodepath-m/44'/60'/0'/37/ , relayerIndex-30 , address-e1e33e6495d5f42abd419cd30a4e4c3acf1643b9 +nodepath-m/44'/60'/0'/37/ , relayerIndex-31 , address-934eebe9968d10e511d715e0e253d503b7879844 +nodepath-m/44'/60'/0'/37/ , relayerIndex-32 , address-0ec1b7e78f17759867613506a8fc92f5e0ff8167 +nodepath-m/44'/60'/0'/37/ , relayerIndex-33 , address-438362f6b072d664339da19e11c17ee03c652596 +nodepath-m/44'/60'/0'/37/ , relayerIndex-34 , address-4d5d89c95862b377125b50c6f11c4514d82a98ab +nodepath-m/44'/60'/0'/37/ , relayerIndex-35 , address-e93cbf1ad41a38cc705e7fab0121e71230f24fb5 +nodepath-m/44'/60'/0'/37/ , relayerIndex-36 , address-f09569b99eae0d5f55a15c008847a8859568485d +nodepath-m/44'/60'/0'/37/ , relayerIndex-37 , address-869e77f9af5f2252b506d68e68940a6e666defeb +nodepath-m/44'/60'/0'/37/ , relayerIndex-38 , address-6198f2fc47171b1ed6183907fe5c6bf81c12c739 +nodepath-m/44'/60'/0'/37/ , relayerIndex-39 , address-34eaf48479dbf932c048bf622b6e7a565cf7a6a7 +nodepath-m/44'/60'/0'/37/ , relayerIndex-40 , address-66d44b359e8d6ab8b8928e910d64dc7e7ddfbda8 +nodepath-m/44'/60'/0'/37/ , relayerIndex-41 , address-155d676f32ba56b6efda3b41adb7defedbedf302 +nodepath-m/44'/60'/0'/37/ , relayerIndex-42 , address-98f7b87d535c6ca5b8b4f3be7a189329b0b84dd4 +nodepath-m/44'/60'/0'/37/ , relayerIndex-43 , address-22fa9477d151957fb9bd2bd17925e4ba944f58af +nodepath-m/44'/60'/0'/37/ , relayerIndex-44 , address-ef97ef95ff064601c5f21ff19602bf8af79c5a39 +nodepath-m/44'/60'/0'/37/ , relayerIndex-45 , address-692a71e84e3cc2b1dd1758ebcd6e64864c392189 +nodepath-m/44'/60'/0'/37/ , relayerIndex-46 , address-a00dd5309e90c979906f62f9479a9b6a0a5ad0fc +nodepath-m/44'/60'/0'/37/ , relayerIndex-47 , address-53c7fcddeeceee94afae027f5886af9b4acbfb15 +nodepath-m/44'/60'/0'/37/ , relayerIndex-48 , address-68a7231e066ec3b3be4dd8949b4b47c36e60a7ff +nodepath-m/44'/60'/0'/37/ , relayerIndex-49 , address-8f705ccec1cbd020ec3858d09c78ebea4caefd82 +nodepath-m/44'/60'/0'/38/ , relayerIndex-0 , address-ab8d4a78eae81e30d71dcd68881fd44221d44ce3 +nodepath-m/44'/60'/0'/38/ , relayerIndex-1 , address-26bf1d805617651d2f97086297c321a6cd044d14 +nodepath-m/44'/60'/0'/38/ , relayerIndex-2 , address-b46b525124896373ca1964e0dca34d3636639f85 +nodepath-m/44'/60'/0'/38/ , relayerIndex-3 , address-3aa21b0f527f87a60cb84349afad186453dd246d +nodepath-m/44'/60'/0'/38/ , relayerIndex-4 , address-f6e42a099ab1c0f10e31f7cbda6f4ab380797898 +nodepath-m/44'/60'/0'/38/ , relayerIndex-5 , address-ffbaa106989abe25c11030c6dc5595923c5d7b47 +nodepath-m/44'/60'/0'/38/ , relayerIndex-6 , address-cf9cfeb88f58f1032cb250481c7dbe73d0b69ea1 +nodepath-m/44'/60'/0'/38/ , relayerIndex-7 , address-11fec4b05a4420f7172fc6b161e49c0b231d9ca5 +nodepath-m/44'/60'/0'/38/ , relayerIndex-8 , address-bbf595a92adf1708d621170638a928e46c224e31 +nodepath-m/44'/60'/0'/38/ , relayerIndex-9 , address-c4465a3eb53d7bf62679bc9699814c34fec6e06e +nodepath-m/44'/60'/0'/38/ , relayerIndex-10 , address-ef64df2d741229972d20e94394ba4fd92cfbec59 +nodepath-m/44'/60'/0'/38/ , relayerIndex-11 , address-9b25b70db289b98ebd6dbccbb2a68b641bba55af +nodepath-m/44'/60'/0'/38/ , relayerIndex-12 , address-8f2b58ca5246596c08c68a0fdc21de12dd6b827e +nodepath-m/44'/60'/0'/38/ , relayerIndex-13 , address-ba763d9c5d5a365f646401d43547503a0f53c31f +nodepath-m/44'/60'/0'/38/ , relayerIndex-14 , address-2b7e2f8119c2f7e10af9b2c837d17b05da887d16 +nodepath-m/44'/60'/0'/38/ , relayerIndex-15 , address-e466534d95fea2fc7cfd4b3eba8b361b40bd3f7a +nodepath-m/44'/60'/0'/38/ , relayerIndex-16 , address-daff4f4e118214a92c6f26cea28e10d07e0c3ca6 +nodepath-m/44'/60'/0'/38/ , relayerIndex-17 , address-9b8de97562af91557b818d5d34dcf08609cdce4c +nodepath-m/44'/60'/0'/38/ , relayerIndex-18 , address-f1f18470003bb7bd9500f5faaaf32e76af0245f5 +nodepath-m/44'/60'/0'/38/ , relayerIndex-19 , address-96d32beb7b0bbf1b07c61ffe86e6f17db0a802c8 +nodepath-m/44'/60'/0'/38/ , relayerIndex-20 , address-4c38a0dd207f6a62b926f96e711ac121b1a89a36 +nodepath-m/44'/60'/0'/38/ , relayerIndex-21 , address-8e117a03bae08977e1537c2a0aa4c0e69bbfdcf7 +nodepath-m/44'/60'/0'/38/ , relayerIndex-22 , address-a0c23db29e4124ed2e599cede43df53808cda20e +nodepath-m/44'/60'/0'/38/ , relayerIndex-23 , address-938a0e868f75a8c55a418c204a3e2c7a3b7f1323 +nodepath-m/44'/60'/0'/38/ , relayerIndex-24 , address-b925096826c63584995728c4e6cb36acfaa49d45 +nodepath-m/44'/60'/0'/38/ , relayerIndex-25 , address-5636650701ea6c70e83da06e6d70f83c56cf9c78 +nodepath-m/44'/60'/0'/38/ , relayerIndex-26 , address-2d0bbaa5e643983c974ac0fdcdec6459d9a8757f +nodepath-m/44'/60'/0'/38/ , relayerIndex-27 , address-35b59a74ada41491097448198975d9c9ceba6f57 +nodepath-m/44'/60'/0'/38/ , relayerIndex-28 , address-1bf81d30deb85489b729c7efe122e6dee223ef43 +nodepath-m/44'/60'/0'/38/ , relayerIndex-29 , address-4d7a3e56da4d688f4061380bb7911304d8f5c8a8 +nodepath-m/44'/60'/0'/38/ , relayerIndex-30 , address-7f4178aa826ea0b67a8a0a8295b9869e421c4443 +nodepath-m/44'/60'/0'/38/ , relayerIndex-31 , address-816eee89fdba149b7cb69519ea39dfbf5a565239 +nodepath-m/44'/60'/0'/38/ , relayerIndex-32 , address-ff99409f9b27583564aa71d3f8fb73771372fe44 +nodepath-m/44'/60'/0'/38/ , relayerIndex-33 , address-f7d42eaf5bfe039f6b0d514f9b7ebf53d37c4188 +nodepath-m/44'/60'/0'/38/ , relayerIndex-34 , address-b137e8214648922d906f8b826cf3bae5252e4493 +nodepath-m/44'/60'/0'/38/ , relayerIndex-35 , address-171b6c9210556766c52c8b92a470f375d70828b3 +nodepath-m/44'/60'/0'/38/ , relayerIndex-36 , address-e78ad14c806ffd1418976d3dea2cc6d7d8430eaa +nodepath-m/44'/60'/0'/38/ , relayerIndex-37 , address-83be6c6bc2eefdfb68f60b9877dd4919b6624275 +nodepath-m/44'/60'/0'/38/ , relayerIndex-38 , address-ec203d785cc5aac17b4c6c8b94b1321be273eb16 +nodepath-m/44'/60'/0'/38/ , relayerIndex-39 , address-3795d86479453af5fd5cf8f27d932a337e0c330e +nodepath-m/44'/60'/0'/38/ , relayerIndex-40 , address-fd955ca8812bb28e8d5822137bff1513f1b1e3b2 +nodepath-m/44'/60'/0'/38/ , relayerIndex-41 , address-802c43cdd022d58c910d29c39ae4917035f1b964 +nodepath-m/44'/60'/0'/38/ , relayerIndex-42 , address-3a4b00885e457677319dda2759b708924eba82aa +nodepath-m/44'/60'/0'/38/ , relayerIndex-43 , address-e79974ac28558a75adfefb3efafcbfa7479734b0 +nodepath-m/44'/60'/0'/38/ , relayerIndex-44 , address-804bc207c4b937733b395cd2622ea6e31714b559 +nodepath-m/44'/60'/0'/38/ , relayerIndex-45 , address-2427ea27100763e77b69e5f1857def2dbae757d5 +nodepath-m/44'/60'/0'/38/ , relayerIndex-46 , address-96eea86209d1edae176256beca56d0e7011c682b +nodepath-m/44'/60'/0'/38/ , relayerIndex-47 , address-f58a5ad9f97f502aad15e1dd2aeac866cef75803 +nodepath-m/44'/60'/0'/38/ , relayerIndex-48 , address-1a7eb38bafada4a9558846c0c802c22c081d1c26 +nodepath-m/44'/60'/0'/38/ , relayerIndex-49 , address-b725c6f834ff418268571869c34e1f98a5782f0f +nodepath-m/44'/60'/0'/39/ , relayerIndex-0 , address-4329a7e633f4b4a5f8d3eb1b07e2a74e42818202 +nodepath-m/44'/60'/0'/39/ , relayerIndex-1 , address-ea786e7376d8d535a9b8c4c0f2d05d0c27d1b95d +nodepath-m/44'/60'/0'/39/ , relayerIndex-2 , address-e7eb38a367962b4a2fac02f2296b6eb03149d9f0 +nodepath-m/44'/60'/0'/39/ , relayerIndex-3 , address-cc3f0b126d307908f835fbc63bc0b4514a10c13b +nodepath-m/44'/60'/0'/39/ , relayerIndex-4 , address-2908880367db5881197b74d97b1bd92f53b8a361 +nodepath-m/44'/60'/0'/39/ , relayerIndex-5 , address-20572ab11c667014bb3d08846e95e87ea249826b +nodepath-m/44'/60'/0'/39/ , relayerIndex-6 , address-269ef7be5154a3fc2fa0314483f3126dc6a72391 +nodepath-m/44'/60'/0'/39/ , relayerIndex-7 , address-f3af2f0ee902a305f90665bbf087e2237d5bdffe +nodepath-m/44'/60'/0'/39/ , relayerIndex-8 , address-a3a84d187acd3e47a94844f7c2daadff7a906d27 +nodepath-m/44'/60'/0'/39/ , relayerIndex-9 , address-b8357e5ecd361857abe2e51640e03c832365efa2 +nodepath-m/44'/60'/0'/39/ , relayerIndex-10 , address-5e37d27b6374a78adeb77692bc4fc1115efaea5d +nodepath-m/44'/60'/0'/39/ , relayerIndex-11 , address-92f317c0a1415859b4c83ae05bd312e89c0c16dd +nodepath-m/44'/60'/0'/39/ , relayerIndex-12 , address-54ca6c071b1a9db94ef91ca5c0100c075ecf08ff +nodepath-m/44'/60'/0'/39/ , relayerIndex-13 , address-a0d892666e3ff150937e667afe6380d8ae278dac +nodepath-m/44'/60'/0'/39/ , relayerIndex-14 , address-84911c3fc93be8a218727ba2eede32db6e66186f +nodepath-m/44'/60'/0'/39/ , relayerIndex-15 , address-c2c08fe8a1fa5b2d15d100187a857db6de1c5e8c +nodepath-m/44'/60'/0'/39/ , relayerIndex-16 , address-9a8093b291f5acb63beb28933e60152cb01ab962 +nodepath-m/44'/60'/0'/39/ , relayerIndex-17 , address-c3a0acca2ef437716966fc939c85d45ac9f181e0 +nodepath-m/44'/60'/0'/39/ , relayerIndex-18 , address-56d72bdae978cd7ef1510da06df25cf2ce1347a6 +nodepath-m/44'/60'/0'/39/ , relayerIndex-19 , address-39c2a518533bc680f95cfc481e1039f33dfdc108 +nodepath-m/44'/60'/0'/39/ , relayerIndex-20 , address-8940eae4e75b49eb95b1b95d31330148c29f3f2d +nodepath-m/44'/60'/0'/39/ , relayerIndex-21 , address-d2da4a514182d7ea726035a1f4ff45d4c532c497 +nodepath-m/44'/60'/0'/39/ , relayerIndex-22 , address-18f2350186a834b54f37fed5905e9bb912ac2d86 +nodepath-m/44'/60'/0'/39/ , relayerIndex-23 , address-e9288b9180d5df25c0265fe091b753c712c659e5 +nodepath-m/44'/60'/0'/39/ , relayerIndex-24 , address-273d9b294f7125a47849c2d764f188c772897095 +nodepath-m/44'/60'/0'/39/ , relayerIndex-25 , address-016a0206d7bcec280943ffbfe34546366dfc302f +nodepath-m/44'/60'/0'/39/ , relayerIndex-26 , address-24eeaaf426adbe9c036320f09ee3a329eee3b7d3 +nodepath-m/44'/60'/0'/39/ , relayerIndex-27 , address-b4e3cd121deeae0a6347251a150d14d42bfd0af5 +nodepath-m/44'/60'/0'/39/ , relayerIndex-28 , address-c177ee75c1b3de6a1a01cc49531808503e7c2dab +nodepath-m/44'/60'/0'/39/ , relayerIndex-29 , address-93491a532ffa81953ed44b99614398386f9a92a9 +nodepath-m/44'/60'/0'/39/ , relayerIndex-30 , address-8881363de220999f1d10b138ac14b9b3c6f15dd5 +nodepath-m/44'/60'/0'/39/ , relayerIndex-31 , address-22b1567549429713077b8e32ea8f5427e81b9c07 +nodepath-m/44'/60'/0'/39/ , relayerIndex-32 , address-df8109d087e7cecc9a1bd562ca623741f899571f +nodepath-m/44'/60'/0'/39/ , relayerIndex-33 , address-5c430832483b60c8a4a75e1ff26a6d48c9df4759 +nodepath-m/44'/60'/0'/39/ , relayerIndex-34 , address-afa3fd06fbaed15a0e1fae16ec78b1cb83ee6d48 +nodepath-m/44'/60'/0'/39/ , relayerIndex-35 , address-93b18615e848227b9c6ddeb185dd1c6e6fc6f81f +nodepath-m/44'/60'/0'/39/ , relayerIndex-36 , address-11c428139d1dbce050b27dd16bf7676145815214 +nodepath-m/44'/60'/0'/39/ , relayerIndex-37 , address-a12a8d5bc208e0d290137021b88be99c30066ba8 +nodepath-m/44'/60'/0'/39/ , relayerIndex-38 , address-8132019669da7e466594c39b931c588b2f484544 +nodepath-m/44'/60'/0'/39/ , relayerIndex-39 , address-19adc5ab7b751a01575f410108ae0cbab49a1fc9 +nodepath-m/44'/60'/0'/39/ , relayerIndex-40 , address-f657556861ff7160ec2c1dd43dcecf53743b2e6f +nodepath-m/44'/60'/0'/39/ , relayerIndex-41 , address-8ed940915195526ec0faf532c9a1984cec61e96e +nodepath-m/44'/60'/0'/39/ , relayerIndex-42 , address-d259a8d8fd864f44510dea99b616fc0351acb9eb +nodepath-m/44'/60'/0'/39/ , relayerIndex-43 , address-7876f66d5e5c82557b98df12efeb066183220c5c +nodepath-m/44'/60'/0'/39/ , relayerIndex-44 , address-cb525a44e34895b836bdaab6e3760d513be5c2ca +nodepath-m/44'/60'/0'/39/ , relayerIndex-45 , address-1cc6019e8e7fe75474615b17252714aaa421774e +nodepath-m/44'/60'/0'/39/ , relayerIndex-46 , address-677c9d994410e8d6a5c5e911c2f973af5db3965d +nodepath-m/44'/60'/0'/39/ , relayerIndex-47 , address-df7939244bbaf6ae5d6bcec42ea8536a175a2507 +nodepath-m/44'/60'/0'/39/ , relayerIndex-48 , address-ba9c3858ccabe1aa3e5ad2e4357894288f695136 +nodepath-m/44'/60'/0'/39/ , relayerIndex-49 , address-7f721753089740e734bb4c5810c33434e1808f3e +nodepath-m/44'/60'/0'/40/ , relayerIndex-0 , address-a39a2aec54385005736371c785102aba16f0eaeb +nodepath-m/44'/60'/0'/40/ , relayerIndex-1 , address-059fe7ea98232c2f5a12c3730c64a72cf69fdb2d +nodepath-m/44'/60'/0'/40/ , relayerIndex-2 , address-672291e22c8ba04a126de5b338a5bdace2fc11a2 +nodepath-m/44'/60'/0'/40/ , relayerIndex-3 , address-7a009126a4062d4612cdc5d6a8bb76b6a1150588 +nodepath-m/44'/60'/0'/40/ , relayerIndex-4 , address-8369cef56305ffed2c0d08ab1dc23a9b9a6e6854 +nodepath-m/44'/60'/0'/40/ , relayerIndex-5 , address-7e239f878d229d9c5a343aada52d6fc914724b2b +nodepath-m/44'/60'/0'/40/ , relayerIndex-6 , address-5a220cbade477edce7c9ebdae8daa8ef921dc203 +nodepath-m/44'/60'/0'/40/ , relayerIndex-7 , address-a1307faa742af75726a6b3fb2d9d7110e389c129 +nodepath-m/44'/60'/0'/40/ , relayerIndex-8 , address-74f999a5f07e541a8395fd5441e6cd6ecd705810 +nodepath-m/44'/60'/0'/40/ , relayerIndex-9 , address-965ecd7bdb6762167a96e2d7324847ffdaf4035d +nodepath-m/44'/60'/0'/40/ , relayerIndex-10 , address-cb186fffd65b1543420c8bde1f6d99134940b394 +nodepath-m/44'/60'/0'/40/ , relayerIndex-11 , address-de862cbbff33ae3175991a800649b04162f3aed1 +nodepath-m/44'/60'/0'/40/ , relayerIndex-12 , address-b408be824fb44ae311dd5ae41a412658b6cf89df +nodepath-m/44'/60'/0'/40/ , relayerIndex-13 , address-cb2beffc22a3f32713cc0e56469c39fc6415fd22 +nodepath-m/44'/60'/0'/40/ , relayerIndex-14 , address-9f4e9ab33cab2e059d1214be74dec2545bc77e85 +nodepath-m/44'/60'/0'/40/ , relayerIndex-15 , address-4d301b5b1925d4b0d84be985690714db09f02056 +nodepath-m/44'/60'/0'/40/ , relayerIndex-16 , address-4250ee395698c37a1763c9d8e51821157c8fe079 +nodepath-m/44'/60'/0'/40/ , relayerIndex-17 , address-72c244ea5283f1219e8cec6bc9fcae5e2eb98398 +nodepath-m/44'/60'/0'/40/ , relayerIndex-18 , address-ccdcd511ac99d831d8b95f895682fb1d7074329c +nodepath-m/44'/60'/0'/40/ , relayerIndex-19 , address-c573eb461cb9397777dc1c98bc5928e7432fbe56 +nodepath-m/44'/60'/0'/40/ , relayerIndex-20 , address-e795f3db7166910b89f15c8273691d6989014bff +nodepath-m/44'/60'/0'/40/ , relayerIndex-21 , address-8f7fc177987562709c90a1c7d65dbd845cc9bbdc +nodepath-m/44'/60'/0'/40/ , relayerIndex-22 , address-8722c7df11c96628e0972a997af03ef67ccb7024 +nodepath-m/44'/60'/0'/40/ , relayerIndex-23 , address-1d529adcb97ad1422a7183d743db318c0c6298cb +nodepath-m/44'/60'/0'/40/ , relayerIndex-24 , address-29fa174cd3c3601e9c8bf6279cc71a91bb910932 +nodepath-m/44'/60'/0'/40/ , relayerIndex-25 , address-5d8016ca1317888f4e6d4796c8ef5f35d41183b3 +nodepath-m/44'/60'/0'/40/ , relayerIndex-26 , address-49cbdb617d0354897054ec701fc4e28a64fa5c12 +nodepath-m/44'/60'/0'/40/ , relayerIndex-27 , address-71e49475ee4d47ea1e5b20c2f6847a25af06ec5f +nodepath-m/44'/60'/0'/40/ , relayerIndex-28 , address-ec7bafdc57bd86772072ba07568da940f9f6eb5d +nodepath-m/44'/60'/0'/40/ , relayerIndex-29 , address-17f4e5367141620f090c969c76dc0d09d8806c53 +nodepath-m/44'/60'/0'/40/ , relayerIndex-30 , address-02cd24018fe90cadfb45ea80619f52fe6d28c1a2 +nodepath-m/44'/60'/0'/40/ , relayerIndex-31 , address-19522ddb8b3b6b0ccf809eb2ac582db75e5d659e +nodepath-m/44'/60'/0'/40/ , relayerIndex-32 , address-93b1aa39f6ad08767eaa49ba0c157ffe62f51508 +nodepath-m/44'/60'/0'/40/ , relayerIndex-33 , address-1b38c38f73319c375800babc69007460412d96c9 +nodepath-m/44'/60'/0'/40/ , relayerIndex-34 , address-d94b50ffeaf375f2d00c9cf3714e42b89d3cd36d +nodepath-m/44'/60'/0'/40/ , relayerIndex-35 , address-41d66f6faa7da43073555f29744e163e9d10dbee +nodepath-m/44'/60'/0'/40/ , relayerIndex-36 , address-d8508bbf08cef1be96d7f247e007dd5db083f5dc +nodepath-m/44'/60'/0'/40/ , relayerIndex-37 , address-1cdd9ea05c9445382e00aaea4780aae540ad2908 +nodepath-m/44'/60'/0'/40/ , relayerIndex-38 , address-0d112f286395c729ab15669adeb911d438907a4f +nodepath-m/44'/60'/0'/40/ , relayerIndex-39 , address-15fd15d445e57d32e8e5d373ffcda47f609edc68 +nodepath-m/44'/60'/0'/40/ , relayerIndex-40 , address-013d4648fc501ad55118f3a65aa3857619cad988 +nodepath-m/44'/60'/0'/40/ , relayerIndex-41 , address-59dd2ab9aa97a0f3f827c66af2472938b90c8149 +nodepath-m/44'/60'/0'/40/ , relayerIndex-42 , address-c8abbbc4cde1eb13dd8c06f2910b05918eb6d7d3 +nodepath-m/44'/60'/0'/40/ , relayerIndex-43 , address-2d645625ddc894f825fc217fdc5922b809b86d62 +nodepath-m/44'/60'/0'/40/ , relayerIndex-44 , address-c21daadaa6e22edf18ff41052132a6a25a5a2d8b +nodepath-m/44'/60'/0'/40/ , relayerIndex-45 , address-4bd60173c3d178655f2e876cf47755dfb8a592ee +nodepath-m/44'/60'/0'/40/ , relayerIndex-46 , address-a3f02b8a637a0bb37ea69c83b9c7a1c7b7e96007 +nodepath-m/44'/60'/0'/40/ , relayerIndex-47 , address-d6c9fb10da1782a8a8150d3907b22848482daa79 +nodepath-m/44'/60'/0'/40/ , relayerIndex-48 , address-ea3df1dbbdb920f50c448a6c160ad9504efa946f +nodepath-m/44'/60'/0'/40/ , relayerIndex-49 , address-090ed497d1c461d0c04e4fb0d594011422014280 +nodepath-m/44'/60'/0'/41/ , relayerIndex-0 , address-464575bbdb57f6b2755fd219a12efe1af1aad47c +nodepath-m/44'/60'/0'/41/ , relayerIndex-1 , address-8a388f03888982ad02571794063e79ecfefc2c99 +nodepath-m/44'/60'/0'/41/ , relayerIndex-2 , address-901c7c9a23a3e305fb666d005e05b0f1c689b6fd +nodepath-m/44'/60'/0'/41/ , relayerIndex-3 , address-0dc40d2a999a84a7ebe553752971bf4d1f7ccf72 +nodepath-m/44'/60'/0'/41/ , relayerIndex-4 , address-7bcc3791f8afcbc1793c5db5f7e507e7ef40f559 +nodepath-m/44'/60'/0'/41/ , relayerIndex-5 , address-1bd5b48bb6cb23ea21e0c8a26f6c3e9d12e81460 +nodepath-m/44'/60'/0'/41/ , relayerIndex-6 , address-ba97375313794708582dfd0cde8f52d095db7c2b +nodepath-m/44'/60'/0'/41/ , relayerIndex-7 , address-5ad5ed389030fe459160507e9bd6b0311384bb1f +nodepath-m/44'/60'/0'/41/ , relayerIndex-8 , address-7c75ba228c44614efc560d8f0cebbdabcf5e50e7 +nodepath-m/44'/60'/0'/41/ , relayerIndex-9 , address-f3ad5272d7313ca1ac015e29e5a9e6d217a1ac26 +nodepath-m/44'/60'/0'/41/ , relayerIndex-10 , address-185a789596c368829483ebe6c7b70f0c101ad577 +nodepath-m/44'/60'/0'/41/ , relayerIndex-11 , address-511741d18ea92075397d224d2e94ef41b0d819fe +nodepath-m/44'/60'/0'/41/ , relayerIndex-12 , address-cb5f89964d2d4b3db16cdc5c02e286c3f6b43481 +nodepath-m/44'/60'/0'/41/ , relayerIndex-13 , address-0f38df43a6ecd7428bcc41f15aaf3c2f50842db2 +nodepath-m/44'/60'/0'/41/ , relayerIndex-14 , address-634476e742d17de14e0fbf9c81279e417c352e25 +nodepath-m/44'/60'/0'/41/ , relayerIndex-15 , address-b6b9beb2e765dc1d1918ab924672c4438337c991 +nodepath-m/44'/60'/0'/41/ , relayerIndex-16 , address-d582d5ccf1f0f99d2f6c1aca62e881f32f0c0f0a +nodepath-m/44'/60'/0'/41/ , relayerIndex-17 , address-984db5103724d95d7d2c8646f5a5e46705e94a04 +nodepath-m/44'/60'/0'/41/ , relayerIndex-18 , address-0e840818b3f6d423537ec5bbe2d0af4ac6a546fb +nodepath-m/44'/60'/0'/41/ , relayerIndex-19 , address-4abd1ccc420927f9dec560b9855a608cc60b0cfd +nodepath-m/44'/60'/0'/41/ , relayerIndex-20 , address-90942d682104326b9717658665e7b9fc3e38e297 +nodepath-m/44'/60'/0'/41/ , relayerIndex-21 , address-c3a86a492cb3357d6696df2ae57d51bb85d1634b +nodepath-m/44'/60'/0'/41/ , relayerIndex-22 , address-ded8fbda7e1c8667e57a093113d93aa4b21b3dac +nodepath-m/44'/60'/0'/41/ , relayerIndex-23 , address-5b6abf3b2693eef3d2a8670bcb150deb5d2b3854 +nodepath-m/44'/60'/0'/41/ , relayerIndex-24 , address-51b7a3e631987bc050857f7eb22a655ae8ae15f9 +nodepath-m/44'/60'/0'/41/ , relayerIndex-25 , address-d01f90b180a4120305aaa265025ef105b95ac3f4 +nodepath-m/44'/60'/0'/41/ , relayerIndex-26 , address-92930265d16fddc7f50d9fd12332df31b9615cde +nodepath-m/44'/60'/0'/41/ , relayerIndex-27 , address-e04a9a6e9c980f067d83beef76424ed12be498e5 +nodepath-m/44'/60'/0'/41/ , relayerIndex-28 , address-8125667bdff8c309a449c2c5efac5376b5091f92 +nodepath-m/44'/60'/0'/41/ , relayerIndex-29 , address-de94337d9ce28a437330190e875e00148f495215 +nodepath-m/44'/60'/0'/41/ , relayerIndex-30 , address-33f8da1d6b09acfeb2236fc38ebe442dd83e83fd +nodepath-m/44'/60'/0'/41/ , relayerIndex-31 , address-3e6a6a39c59302404f018f84a28b5179c7e0c9a6 +nodepath-m/44'/60'/0'/41/ , relayerIndex-32 , address-c4ceb3cbb5ab96dd315bc50e3a0353a4e78ad077 +nodepath-m/44'/60'/0'/41/ , relayerIndex-33 , address-9cbae64638e27a89ffcbc9ff35e61a43cbd0d699 +nodepath-m/44'/60'/0'/41/ , relayerIndex-34 , address-d76e7048622397994b62aa087af2241203cd4f1b +nodepath-m/44'/60'/0'/41/ , relayerIndex-35 , address-b074bb210535717a310f4b8c798ecda7585b6576 +nodepath-m/44'/60'/0'/41/ , relayerIndex-36 , address-35e498d9a127e620c96c9ef0ec15e93d48d5b9ce +nodepath-m/44'/60'/0'/41/ , relayerIndex-37 , address-4e91371331843270cde9ab82b69a2ff70248ef00 +nodepath-m/44'/60'/0'/41/ , relayerIndex-38 , address-30cbecb9bff17cc2ba99dea90c98946e5ff2b48a +nodepath-m/44'/60'/0'/41/ , relayerIndex-39 , address-6b352fdba5824d9242290ad039508e885043ffe1 +nodepath-m/44'/60'/0'/41/ , relayerIndex-40 , address-449651bf63c85a1a9b2b9fb65f922e4017e67285 +nodepath-m/44'/60'/0'/41/ , relayerIndex-41 , address-42d71e6b78e04f3642437f648061e5af31d5e62e +nodepath-m/44'/60'/0'/41/ , relayerIndex-42 , address-ebb61d0d87b817b5ae1e25e63e5a83ce81f12ad8 +nodepath-m/44'/60'/0'/41/ , relayerIndex-43 , address-33e943e5bf79b6306f327ccb61f1922c0d120e63 +nodepath-m/44'/60'/0'/41/ , relayerIndex-44 , address-9f64b0c4fc41ab1a5d92bb66ec0d90bfc1b8afef +nodepath-m/44'/60'/0'/41/ , relayerIndex-45 , address-2edb931690f658cd5457c5f81059ffdb4e943421 +nodepath-m/44'/60'/0'/41/ , relayerIndex-46 , address-bc7ad6cd1f7f1acd2a646dd8a858dd27006a8712 +nodepath-m/44'/60'/0'/41/ , relayerIndex-47 , address-35b6a38eb4cd4e22c8d26a1046152a2bb6445a9a +nodepath-m/44'/60'/0'/41/ , relayerIndex-48 , address-0f90d445d34ce742eee6a04ac75c6a40693eb173 +nodepath-m/44'/60'/0'/41/ , relayerIndex-49 , address-6c01e7a70ec29b46a1bd7ab9fa57ca66a95f8c6f +nodepath-m/44'/60'/0'/42/ , relayerIndex-0 , address-825e8ee756397cddd249aad8461e0f83fa7f327b +nodepath-m/44'/60'/0'/42/ , relayerIndex-1 , address-42b33dbc0da7da332af8661d4ee3e5fe5c88a275 +nodepath-m/44'/60'/0'/42/ , relayerIndex-2 , address-f122ba99e507b403e3ddba7510e84bdf61ef8004 +nodepath-m/44'/60'/0'/42/ , relayerIndex-3 , address-a82734faf59d8663fbaac56b6faae3ca44aa516c +nodepath-m/44'/60'/0'/42/ , relayerIndex-4 , address-bf69ead4d5f091d0f1a0d35434236de1e0be7dbd +nodepath-m/44'/60'/0'/42/ , relayerIndex-5 , address-be22819432d59325831031b828796e75b6b60226 +nodepath-m/44'/60'/0'/42/ , relayerIndex-6 , address-75470313287e3776ee65bef1155ae5952a809bd7 +nodepath-m/44'/60'/0'/42/ , relayerIndex-7 , address-3afccce953865dde40a865fca7ec3913af9757f8 +nodepath-m/44'/60'/0'/42/ , relayerIndex-8 , address-62d8175c8ed667e77dc4ee02456182f637deaa0c +nodepath-m/44'/60'/0'/42/ , relayerIndex-9 , address-fc733cd162470b5dd62bd2fbdc9eede6da49fa33 +nodepath-m/44'/60'/0'/42/ , relayerIndex-10 , address-49ef65fdcfb7489f29ce1b44bd88db864ec75f50 +nodepath-m/44'/60'/0'/42/ , relayerIndex-11 , address-9fd6563969755f70bf88e62f10711e63135018bf +nodepath-m/44'/60'/0'/42/ , relayerIndex-12 , address-4e0fb96e434241c1c6f64b1588c9d24e65f8cd69 +nodepath-m/44'/60'/0'/42/ , relayerIndex-13 , address-24a146b5dbe3289beda9c43d10cb0ea32e420936 +nodepath-m/44'/60'/0'/42/ , relayerIndex-14 , address-6479b423032dc742a4c5f59d344179fd65a1ddd9 +nodepath-m/44'/60'/0'/42/ , relayerIndex-15 , address-4e3ea3647e03270dfaed2dcdb9998a558ab1f3ff +nodepath-m/44'/60'/0'/42/ , relayerIndex-16 , address-2e3233033e84faa7c2625bfb40eb8a89b8e088ef +nodepath-m/44'/60'/0'/42/ , relayerIndex-17 , address-3da3a9328d6575d9f73920d3c56fe58d0184b3dd +nodepath-m/44'/60'/0'/42/ , relayerIndex-18 , address-0e8f7c95d9efa40fd088f729eea490342d0a3207 +nodepath-m/44'/60'/0'/42/ , relayerIndex-19 , address-82db9d613945f561f60a66b23c3a352818be569f +nodepath-m/44'/60'/0'/42/ , relayerIndex-20 , address-f854c0a8a33488ede96df5923ecbb36e99caf793 +nodepath-m/44'/60'/0'/42/ , relayerIndex-21 , address-6aefaae745c3ec9154c9cd30c3008397ff6dbc7c +nodepath-m/44'/60'/0'/42/ , relayerIndex-22 , address-6f0c57d00788030f97babf97e3c8f09c65420d8b +nodepath-m/44'/60'/0'/42/ , relayerIndex-23 , address-c8404ddbbaae4f1fa54e05b1fe317ffb67ca244a +nodepath-m/44'/60'/0'/42/ , relayerIndex-24 , address-4dadb887e182632f78fd055668becd8085ad451e +nodepath-m/44'/60'/0'/42/ , relayerIndex-25 , address-a36500bd413f0b6f7711ef253bc05c7ccfb4ae85 +nodepath-m/44'/60'/0'/42/ , relayerIndex-26 , address-7a7790ea7f68cdb02b4b231e8dd9294925fdb3cc +nodepath-m/44'/60'/0'/42/ , relayerIndex-27 , address-694dfb7270bba8daad2d9b4b17b8f1e816306b79 +nodepath-m/44'/60'/0'/42/ , relayerIndex-28 , address-f93b967839a1182f7227950bce107bef8843ff44 +nodepath-m/44'/60'/0'/42/ , relayerIndex-29 , address-2b01426765de59c485ceb1bb80621af48a33bcb7 +nodepath-m/44'/60'/0'/42/ , relayerIndex-30 , address-32c93ef300275d58c8613ea2021da0e0acef4c53 +nodepath-m/44'/60'/0'/42/ , relayerIndex-31 , address-21352eab856833c82c49addf6bac98997f7e35c3 +nodepath-m/44'/60'/0'/42/ , relayerIndex-32 , address-7585f27684977afd6175bd57663964915bb7daaa +nodepath-m/44'/60'/0'/42/ , relayerIndex-33 , address-22fdb08c4f4e2f22684fdf1fba21eb3c2cd45413 +nodepath-m/44'/60'/0'/42/ , relayerIndex-34 , address-8e28a1ed5064cbb70416c48d467eaec9deeabc9f +nodepath-m/44'/60'/0'/42/ , relayerIndex-35 , address-f02c76bc9db4b3931dbf48b93179e527ca17312a +nodepath-m/44'/60'/0'/42/ , relayerIndex-36 , address-3b10f30d07b1009ac1d418eeea2b3da3699ea78b +nodepath-m/44'/60'/0'/42/ , relayerIndex-37 , address-2c05e34fa9f274a98c9a1ce75ba1d717adce4cb0 +nodepath-m/44'/60'/0'/42/ , relayerIndex-38 , address-e6e4acd41562a0c5c989d0f009cc9276aac3f9b4 +nodepath-m/44'/60'/0'/42/ , relayerIndex-39 , address-ba59a00056d323e73097490bb19333ef489bb1e9 +nodepath-m/44'/60'/0'/42/ , relayerIndex-40 , address-f6da357180dd4af8b9b92a5f98fde9ed472980e2 +nodepath-m/44'/60'/0'/42/ , relayerIndex-41 , address-0d6b027c1211376a747fb9514b8e42148c33b853 +nodepath-m/44'/60'/0'/42/ , relayerIndex-42 , address-0de733a5e0d6b88625a0ef50f28bb770f4959c2d +nodepath-m/44'/60'/0'/42/ , relayerIndex-43 , address-9691fc008c66158810b9412dc2c5730c0372ca11 +nodepath-m/44'/60'/0'/42/ , relayerIndex-44 , address-1163114abc37db7f939ec164fb35557ae0dffff7 +nodepath-m/44'/60'/0'/42/ , relayerIndex-45 , address-c0827d380a2454a7a3a168ce167a74cbad1ecebe +nodepath-m/44'/60'/0'/42/ , relayerIndex-46 , address-a9ef59bd6d98e43e76ce3bc7a508e4bd8d16f276 +nodepath-m/44'/60'/0'/42/ , relayerIndex-47 , address-dd9d2b3a5ae384b629d82024152d131501d3ab54 +nodepath-m/44'/60'/0'/42/ , relayerIndex-48 , address-e41da9772074d147eea23f64ab4289b785c28b28 +nodepath-m/44'/60'/0'/42/ , relayerIndex-49 , address-98dc3c3a81793df59a542224b23de76262dc8423 +nodepath-m/44'/60'/0'/43/ , relayerIndex-0 , address-5b825563ec8b7cd2c825bda7978e4c1f1f5bb2ee +nodepath-m/44'/60'/0'/43/ , relayerIndex-1 , address-343fa727e7c9bb900433af66086b30378fdba937 +nodepath-m/44'/60'/0'/43/ , relayerIndex-2 , address-5b19acc55ed95148fcd4a0b81c241b66cdbf3275 +nodepath-m/44'/60'/0'/43/ , relayerIndex-3 , address-d5d6d9d61e4adc3d8bdd94817b9d584dcc9b6580 +nodepath-m/44'/60'/0'/43/ , relayerIndex-4 , address-08e062189b66e1855abaa9ddf7fc1d6af41348e3 +nodepath-m/44'/60'/0'/43/ , relayerIndex-5 , address-8f8ecfb6337671e2d3a31e10e73475f60e7a231d +nodepath-m/44'/60'/0'/43/ , relayerIndex-6 , address-0a269cb01caff073b734c78f23712c8a82746344 +nodepath-m/44'/60'/0'/43/ , relayerIndex-7 , address-e07b83b7ec3db6f6776c91f25fc089bdb7d5eeb8 +nodepath-m/44'/60'/0'/43/ , relayerIndex-8 , address-556fe14b6e0c891a2e32ad9c38febc31b09257ca +nodepath-m/44'/60'/0'/43/ , relayerIndex-9 , address-3baed98786e6b0c2c8b0c114a4f3aafbaa3cd0b9 +nodepath-m/44'/60'/0'/43/ , relayerIndex-10 , address-14ef74a839bf09b1a7073323444f5322bdb14f5a +nodepath-m/44'/60'/0'/43/ , relayerIndex-11 , address-418bf8865790911bc106df367c8cf2bf7b6c45d3 +nodepath-m/44'/60'/0'/43/ , relayerIndex-12 , address-4073d5ff75d4fcbbf416f1bc5f3efce7bae1e4ee +nodepath-m/44'/60'/0'/43/ , relayerIndex-13 , address-e640beeb42751f058ee0b8828efe454dd37a159a +nodepath-m/44'/60'/0'/43/ , relayerIndex-14 , address-67c5b61558052cc1d53fe8c8b49a7289b311b77e +nodepath-m/44'/60'/0'/43/ , relayerIndex-15 , address-ada68be868015a63d57333caa839abd1f9001f8c +nodepath-m/44'/60'/0'/43/ , relayerIndex-16 , address-a46050227b2c2d3c268cf85efda4e8716a40718b +nodepath-m/44'/60'/0'/43/ , relayerIndex-17 , address-5dc9338cfd193cc6bc0dfef2c1bd2cd896b88934 +nodepath-m/44'/60'/0'/43/ , relayerIndex-18 , address-f35ebb96c293ea86ee26137c537ca2b1bdb34c11 +nodepath-m/44'/60'/0'/43/ , relayerIndex-19 , address-02764e4f5fe6409542e1593b7916ee6dc4b38ed9 +nodepath-m/44'/60'/0'/43/ , relayerIndex-20 , address-49e54a2e8148841fb858f242ea76543cf7bb49bb +nodepath-m/44'/60'/0'/43/ , relayerIndex-21 , address-10bdb7cd5ffa786a50d3c43db2e8381ba1bd7cec +nodepath-m/44'/60'/0'/43/ , relayerIndex-22 , address-62020565dd9129d60c93745420c8008fa5720f70 +nodepath-m/44'/60'/0'/43/ , relayerIndex-23 , address-0d05b42e4a8ff4ffda19f2039f0a5adc33975f54 +nodepath-m/44'/60'/0'/43/ , relayerIndex-24 , address-821b149a98581b2f509ffb36dac97f7c8c87b917 +nodepath-m/44'/60'/0'/43/ , relayerIndex-25 , address-25293fed5c1890b539dd2543c9c28adf3a42b348 +nodepath-m/44'/60'/0'/43/ , relayerIndex-26 , address-884dbc90d40aa2ba8213a54a8850402408ffc60d +nodepath-m/44'/60'/0'/43/ , relayerIndex-27 , address-2f549676c0d77834607b0de74c36764a8b75b6c2 +nodepath-m/44'/60'/0'/43/ , relayerIndex-28 , address-bb9f56cf3fba383dea67c235dad4aff71d5fd077 +nodepath-m/44'/60'/0'/43/ , relayerIndex-29 , address-76f7b051eb5723c9d228af9f5442e2bfa22e8199 +nodepath-m/44'/60'/0'/43/ , relayerIndex-30 , address-6b6c405c535a1e328a0a941317e2ed658e0d565f +nodepath-m/44'/60'/0'/43/ , relayerIndex-31 , address-5fa6cfec23709a551c30bd4da5e5ef8518660ae4 +nodepath-m/44'/60'/0'/43/ , relayerIndex-32 , address-b923aba9d929ac0400a1a515a78ad4dc1d552372 +nodepath-m/44'/60'/0'/43/ , relayerIndex-33 , address-49dcb006bdb47b87cb6227975e77be16965dcd9a +nodepath-m/44'/60'/0'/43/ , relayerIndex-34 , address-91558b077056a864ad22aba3249310ff764dbf66 +nodepath-m/44'/60'/0'/43/ , relayerIndex-35 , address-4a7088468d59e8a2312176d4ccb6a37e47687327 +nodepath-m/44'/60'/0'/43/ , relayerIndex-36 , address-bcb54d772d741c029af77acdc560416da3fc6652 +nodepath-m/44'/60'/0'/43/ , relayerIndex-37 , address-8dfa488a972f2c7521bd379e0a38678800b44025 +nodepath-m/44'/60'/0'/43/ , relayerIndex-38 , address-171c3d551b2040506e8aef180e154cd4ce170f80 +nodepath-m/44'/60'/0'/43/ , relayerIndex-39 , address-9e719a9d51c23659c6b6c60a95c0014f6d2b101a +nodepath-m/44'/60'/0'/43/ , relayerIndex-40 , address-046ae7be4e2002b76eba71d7c8a80dd1aefd4d0c +nodepath-m/44'/60'/0'/43/ , relayerIndex-41 , address-abba065a771da82683a090d0495f1501130307bd +nodepath-m/44'/60'/0'/43/ , relayerIndex-42 , address-781ba8e859449924ac77c840c1b60854331b071c +nodepath-m/44'/60'/0'/43/ , relayerIndex-43 , address-f417aa39b9130c92855409b4858e875487fe79a2 +nodepath-m/44'/60'/0'/43/ , relayerIndex-44 , address-2968ead3b06aeaa75d1f145d32b14b2f8185b899 +nodepath-m/44'/60'/0'/43/ , relayerIndex-45 , address-70fca92cad1eef9feb4f9855e6a024d7f1e01275 +nodepath-m/44'/60'/0'/43/ , relayerIndex-46 , address-bb23e70ce3ebc4cadf6399d03a44287f34cd9e3d +nodepath-m/44'/60'/0'/43/ , relayerIndex-47 , address-85db518dfe1faf94e9214e324601c95e8312ec22 +nodepath-m/44'/60'/0'/43/ , relayerIndex-48 , address-de0008bb5cf7402a2e5b24ddfdcfaf5004ea5acf +nodepath-m/44'/60'/0'/43/ , relayerIndex-49 , address-c9827e05c5d2ba91d3db97484441e340017fab32 +nodepath-m/44'/60'/0'/44/ , relayerIndex-0 , address-8fd2c4741507d7acfccbfb56b516dd8d8bdf96ad +nodepath-m/44'/60'/0'/44/ , relayerIndex-1 , address-3899b3b2f352ed67ed3f42dea8002b70e4d61317 +nodepath-m/44'/60'/0'/44/ , relayerIndex-2 , address-00c91c7dd8ab2a5f475f6c043703ebbdb24ef336 +nodepath-m/44'/60'/0'/44/ , relayerIndex-3 , address-b9e8ee2399d2dcefba7261cfa1a053c26a1eff3b +nodepath-m/44'/60'/0'/44/ , relayerIndex-4 , address-4169e84dd72088ae209e853e04bfeb46c9293e91 +nodepath-m/44'/60'/0'/44/ , relayerIndex-5 , address-188631dd7ebdadcecda2d69d98243e479d81e9a1 +nodepath-m/44'/60'/0'/44/ , relayerIndex-6 , address-c3dd17f1d405b6377807b2168ef3cdfb9cf01eed +nodepath-m/44'/60'/0'/44/ , relayerIndex-7 , address-4b9c4d63d559b6895d28e6ecc1cd390859c44616 +nodepath-m/44'/60'/0'/44/ , relayerIndex-8 , address-2a7839f48042fcb4ee15842313766d10ec6651dc +nodepath-m/44'/60'/0'/44/ , relayerIndex-9 , address-0fd1fd9ec57049bb0ea345276d61859bd0f02f64 +nodepath-m/44'/60'/0'/44/ , relayerIndex-10 , address-5e487b3f8594f40eb3897adb5d06c4c1cf2359e1 +nodepath-m/44'/60'/0'/44/ , relayerIndex-11 , address-cad532967ff3e362d96ab8f1c2b67b2c50ecc4f9 +nodepath-m/44'/60'/0'/44/ , relayerIndex-12 , address-ede33c381f50601c1043aa3a98e8d59ad466b606 +nodepath-m/44'/60'/0'/44/ , relayerIndex-13 , address-ddc8d50863d71e3ab583cea25d2971f708f7a318 +nodepath-m/44'/60'/0'/44/ , relayerIndex-14 , address-ea1b4c38d16348bc5a336fb28a57f9dbd51ff093 +nodepath-m/44'/60'/0'/44/ , relayerIndex-15 , address-526dbfff2d4cd2d1c1782da0c38058ef6ff64eb9 +nodepath-m/44'/60'/0'/44/ , relayerIndex-16 , address-9e1553cacf7ea5d22404015eb71a8216ec1f4bf1 +nodepath-m/44'/60'/0'/44/ , relayerIndex-17 , address-6459d87f9872f1b56a75ca8d1c152da115599276 +nodepath-m/44'/60'/0'/44/ , relayerIndex-18 , address-ef2f4039cf5d360e0bd34ac38ae4e2ba21e0eaa0 +nodepath-m/44'/60'/0'/44/ , relayerIndex-19 , address-dc9cb8528cc22a0ff3a3018395132144432bed83 +nodepath-m/44'/60'/0'/44/ , relayerIndex-20 , address-341d65c8950372f5e149873747fc0a47b86b00ae +nodepath-m/44'/60'/0'/44/ , relayerIndex-21 , address-4374cbc7ffdf6e113844b6b0d3502893890ab0e0 +nodepath-m/44'/60'/0'/44/ , relayerIndex-22 , address-a71a0cfb79ef4b57bd332c0d7bf24bc94ffa68bb +nodepath-m/44'/60'/0'/44/ , relayerIndex-23 , address-dee6b1257004a61d0cb3caa81a429340ce211732 +nodepath-m/44'/60'/0'/44/ , relayerIndex-24 , address-7f9c249e036373d475d108838552732f58a5bbdc +nodepath-m/44'/60'/0'/44/ , relayerIndex-25 , address-5679a24f1333b1975ef3bd5c5041ab779f9eccbe +nodepath-m/44'/60'/0'/44/ , relayerIndex-26 , address-1522ea58fbbe46f937edb421f57890e8282f693f +nodepath-m/44'/60'/0'/44/ , relayerIndex-27 , address-708db212b5f8d74f175725e44b205318268b821c +nodepath-m/44'/60'/0'/44/ , relayerIndex-28 , address-e8aaaea5b678b38efaa8c8590759017d59a0dbec +nodepath-m/44'/60'/0'/44/ , relayerIndex-29 , address-d468e40a855787debb37f748135ac964db521cfe +nodepath-m/44'/60'/0'/44/ , relayerIndex-30 , address-05f140046bee1dcab784dc6aa459b6614da577cd +nodepath-m/44'/60'/0'/44/ , relayerIndex-31 , address-a6bf95c5e04e0d5225ee5d521b627cc962bb795b +nodepath-m/44'/60'/0'/44/ , relayerIndex-32 , address-495b1470bef2b271617ef856bca82e0949734bfb +nodepath-m/44'/60'/0'/44/ , relayerIndex-33 , address-a564badbca16c8770cc4de656279c8baa34a9c10 +nodepath-m/44'/60'/0'/44/ , relayerIndex-34 , address-f30bf601aadc136cfaf14fafee717dc38577b164 +nodepath-m/44'/60'/0'/44/ , relayerIndex-35 , address-a1c3d2d166c9640fc9e2c632dc481c9e3cfa6103 +nodepath-m/44'/60'/0'/44/ , relayerIndex-36 , address-7a2865b7d6cda918992914b0e1fdc3679dbad74e +nodepath-m/44'/60'/0'/44/ , relayerIndex-37 , address-700263c1042536e251e1c47f221745daccaf77bd +nodepath-m/44'/60'/0'/44/ , relayerIndex-38 , address-3fe77529941d17636881bb4370c6701bf24647fd +nodepath-m/44'/60'/0'/44/ , relayerIndex-39 , address-efa05c8baccc426d9daacdcddf6dfe4c956cd1de +nodepath-m/44'/60'/0'/44/ , relayerIndex-40 , address-53ef5f4b690c9d023a9a090ed35033f69f7b8b56 +nodepath-m/44'/60'/0'/44/ , relayerIndex-41 , address-9f26040b8f215bbaaa011ab7acd33b95a12d39c3 +nodepath-m/44'/60'/0'/44/ , relayerIndex-42 , address-3a708e5c985abac1e13e5d744c752e25cdc437a2 +nodepath-m/44'/60'/0'/44/ , relayerIndex-43 , address-0a38b07d95774b923bb1abb3657b91571cf55ab8 +nodepath-m/44'/60'/0'/44/ , relayerIndex-44 , address-5c60c4170eaad9ed2769eee6293adf0966aa2fa5 +nodepath-m/44'/60'/0'/44/ , relayerIndex-45 , address-2dd229042e3dc6f63c91c59362308007da416f8d +nodepath-m/44'/60'/0'/44/ , relayerIndex-46 , address-b384dfccd26c1a340bdbabd52aff67900bd244b3 +nodepath-m/44'/60'/0'/44/ , relayerIndex-47 , address-d96a36b13e20d11b392ec50b93d9240758807a84 +nodepath-m/44'/60'/0'/44/ , relayerIndex-48 , address-9386d5a0500757ed322bfd1da8e557f09f6813d0 +nodepath-m/44'/60'/0'/44/ , relayerIndex-49 , address-79a324bddb3452c7d3419ff75a902e8a2cf093ae +nodepath-m/44'/60'/0'/45/ , relayerIndex-0 , address-fe86572f9785c0227d53ed63748c482fa2042668 +nodepath-m/44'/60'/0'/45/ , relayerIndex-1 , address-ecfdc957eaed2dd208239ddc2ed90b8a19c15d42 +nodepath-m/44'/60'/0'/45/ , relayerIndex-2 , address-6c98e5281083a86158dedfe1dcc9098577bdf0ab +nodepath-m/44'/60'/0'/45/ , relayerIndex-3 , address-ab0414d9672950666b3be7354cd780c60ac46a45 +nodepath-m/44'/60'/0'/45/ , relayerIndex-4 , address-8912e78b18716d1b41abfb6eb762795de5ebff7e +nodepath-m/44'/60'/0'/45/ , relayerIndex-5 , address-a577fe874f06b5950a5b982572439d7881ba2081 +nodepath-m/44'/60'/0'/45/ , relayerIndex-6 , address-3cdfc1345a60b069de9479dc5cb41646f950f2ba +nodepath-m/44'/60'/0'/45/ , relayerIndex-7 , address-98af350447f48feaf49cb26e7e67c2a076993c0b +nodepath-m/44'/60'/0'/45/ , relayerIndex-8 , address-87db06df9c7dade1e7960751d124c52bc38bbcc0 +nodepath-m/44'/60'/0'/45/ , relayerIndex-9 , address-3a3368b2db40c5392eda14e2d4de9652fba54eae +nodepath-m/44'/60'/0'/45/ , relayerIndex-10 , address-ab3e82a2bb16b006f7681ba104c891b2ff0505ef +nodepath-m/44'/60'/0'/45/ , relayerIndex-11 , address-5271e26c3d5434daa1e46c88fe32a83084f3ca1f +nodepath-m/44'/60'/0'/45/ , relayerIndex-12 , address-020b124a30cbdbd3017c8890e3b9fc7e5545cdfc +nodepath-m/44'/60'/0'/45/ , relayerIndex-13 , address-9efadcf8ce19d7953d2d3f0151b8f5e200530874 +nodepath-m/44'/60'/0'/45/ , relayerIndex-14 , address-b321ee375c625af83ab411dace871b9b4858b7ee +nodepath-m/44'/60'/0'/45/ , relayerIndex-15 , address-b335e58945cc99f159d9d7d3ab3e6ff81d8d5285 +nodepath-m/44'/60'/0'/45/ , relayerIndex-16 , address-71688098623aa79199a4788ad7513f557d0d1404 +nodepath-m/44'/60'/0'/45/ , relayerIndex-17 , address-37cfb2955fdc1a0be2f0372506d153b200e107e9 +nodepath-m/44'/60'/0'/45/ , relayerIndex-18 , address-0bfcf32eec1d7f70403d5f8a428e5900a0ed271f +nodepath-m/44'/60'/0'/45/ , relayerIndex-19 , address-cafe7be692fbd32bd2ad6d0375929b580a0da0f7 +nodepath-m/44'/60'/0'/45/ , relayerIndex-20 , address-9cf97d881d25de8856cad5f0e2b9a547d8690185 +nodepath-m/44'/60'/0'/45/ , relayerIndex-21 , address-5638146f9e3cfb46449fc30d2200928fd090b341 +nodepath-m/44'/60'/0'/45/ , relayerIndex-22 , address-c69f071c8efa46cb3dd4dd7b3904ee07b791a02f +nodepath-m/44'/60'/0'/45/ , relayerIndex-23 , address-9d2d774267152edbacf70ac741300ab8fd3fae16 +nodepath-m/44'/60'/0'/45/ , relayerIndex-24 , address-3619307cf1974de3bae377960a957909666f4fcf +nodepath-m/44'/60'/0'/45/ , relayerIndex-25 , address-d8d0c797e737ebaae00f22fa6d95058db28c0e79 +nodepath-m/44'/60'/0'/45/ , relayerIndex-26 , address-489d6b2644cc6c23212de8f7e5453767501481b4 +nodepath-m/44'/60'/0'/45/ , relayerIndex-27 , address-c922bb9ee38a862b5d9188577d21b4dc594d6382 +nodepath-m/44'/60'/0'/45/ , relayerIndex-28 , address-09ecfa6735ca5e36c49da1e7f75abbf7ccf61d29 +nodepath-m/44'/60'/0'/45/ , relayerIndex-29 , address-52b875ceda981d37525c2456e62e1080c894f98e +nodepath-m/44'/60'/0'/45/ , relayerIndex-30 , address-53c1d69a6739e05a9ec3cb0f756bf01b5feefd62 +nodepath-m/44'/60'/0'/45/ , relayerIndex-31 , address-7947432db86e7863cd1764d0b63531c4c3024946 +nodepath-m/44'/60'/0'/45/ , relayerIndex-32 , address-ae378a93404925f554570287da164fd2c6e685c2 +nodepath-m/44'/60'/0'/45/ , relayerIndex-33 , address-b8d7bf510c5a4caa5f1de87c8500f06922984c93 +nodepath-m/44'/60'/0'/45/ , relayerIndex-34 , address-786029c820ed257a824ff0433e1816b78a99682b +nodepath-m/44'/60'/0'/45/ , relayerIndex-35 , address-d88efae02b2845556ac3746ffa0b3fd235d889c9 +nodepath-m/44'/60'/0'/45/ , relayerIndex-36 , address-4e931df7e0f358981f170c231d7d27b67459cbbe +nodepath-m/44'/60'/0'/45/ , relayerIndex-37 , address-6b6a2f079b0b91c84f762b47312326843dfd8549 +nodepath-m/44'/60'/0'/45/ , relayerIndex-38 , address-0d6afd7b3f5f5ee7d34b8cab3731239a936c3bdf +nodepath-m/44'/60'/0'/45/ , relayerIndex-39 , address-599253cd607b90c648dfa3a80f7cc00740037207 +nodepath-m/44'/60'/0'/45/ , relayerIndex-40 , address-a011416e831bca384687fd689aa65e0933f01b19 +nodepath-m/44'/60'/0'/45/ , relayerIndex-41 , address-1db4a8aa605164d435e54c21a398da44dd897a61 +nodepath-m/44'/60'/0'/45/ , relayerIndex-42 , address-2d0b276258f4b69c51480ba20d8c1f2cae0f7909 +nodepath-m/44'/60'/0'/45/ , relayerIndex-43 , address-541b83843ff02c816c76d1a25a96cd00debb7d77 +nodepath-m/44'/60'/0'/45/ , relayerIndex-44 , address-debb6d846d8bfa888d7606cfc58653ca75c7f885 +nodepath-m/44'/60'/0'/45/ , relayerIndex-45 , address-23abc65689965d8f03aa85a8c053e2a91da97b91 +nodepath-m/44'/60'/0'/45/ , relayerIndex-46 , address-7fd3b265fb97105581754278430873db3cc3c5ba +nodepath-m/44'/60'/0'/45/ , relayerIndex-47 , address-2cc8a849abaf269fa1152798d160c71a37d07d5a +nodepath-m/44'/60'/0'/45/ , relayerIndex-48 , address-753e728aeefdbdf768a1687e88dd3d3e8d4b35f1 +nodepath-m/44'/60'/0'/45/ , relayerIndex-49 , address-05563dbd48e5b40c8522b0607a339221e652069f +nodepath-m/44'/60'/0'/46/ , relayerIndex-0 , address-24578b271c6e9c14b3493b0ddecaf36ebd0d375c +nodepath-m/44'/60'/0'/46/ , relayerIndex-1 , address-37d8dfa0d0e6273e1163a2f904509b21f37d21c1 +nodepath-m/44'/60'/0'/46/ , relayerIndex-2 , address-9e70b63df3f5e972b29632e8d7addaeb89c34621 +nodepath-m/44'/60'/0'/46/ , relayerIndex-3 , address-a5d6142b2509e65d746d2a9a9fd254a53dfde627 +nodepath-m/44'/60'/0'/46/ , relayerIndex-4 , address-d2db9d637d324ef68b09a5f788e4b4126ce485b4 +nodepath-m/44'/60'/0'/46/ , relayerIndex-5 , address-387cb020209985055eac9073b03d6ccf0c15ff90 +nodepath-m/44'/60'/0'/46/ , relayerIndex-6 , address-f8727924c97da7b36016491db0204da39edabee2 +nodepath-m/44'/60'/0'/46/ , relayerIndex-7 , address-480bcf6ff660684e830a0d70067bd5b766f2e22b +nodepath-m/44'/60'/0'/46/ , relayerIndex-8 , address-c968523f52731b5145ccb962f2c2a4c563bd8bcd +nodepath-m/44'/60'/0'/46/ , relayerIndex-9 , address-fa4696607ed30320c9d1daf766e4c30e7cb9def7 +nodepath-m/44'/60'/0'/46/ , relayerIndex-10 , address-f5dacda1a320aa63a9d2c6950a7b280254ac269b +nodepath-m/44'/60'/0'/46/ , relayerIndex-11 , address-0408d985d26495a5a65bbca469a13c05f726338a +nodepath-m/44'/60'/0'/46/ , relayerIndex-12 , address-2a101e25d10566b5818a2c0b5e79eeb431c3be2f +nodepath-m/44'/60'/0'/46/ , relayerIndex-13 , address-6abdbd1c9e5ee5e47159cf78447b01344b23a418 +nodepath-m/44'/60'/0'/46/ , relayerIndex-14 , address-14a311061c0d3c6b147659c4e77b201fe1bbd9ea +nodepath-m/44'/60'/0'/46/ , relayerIndex-15 , address-e8bb5c0e6b80d1c3c28bc812bc1ad022e6b9c0d6 +nodepath-m/44'/60'/0'/46/ , relayerIndex-16 , address-3d5c4511342191a70b34747fc89d2c1c5232b3fb +nodepath-m/44'/60'/0'/46/ , relayerIndex-17 , address-4c845689d7058c80c75c3d7cc98bac76455a3d85 +nodepath-m/44'/60'/0'/46/ , relayerIndex-18 , address-cea30a7da184b18bf6abce3d4f6b9e5064ff6db6 +nodepath-m/44'/60'/0'/46/ , relayerIndex-19 , address-9cc5bbab4eadaf7bca8e9dd127e148674f28626b +nodepath-m/44'/60'/0'/46/ , relayerIndex-20 , address-e5201a7cedaad39e3167ee7c0d968d6745c10da8 +nodepath-m/44'/60'/0'/46/ , relayerIndex-21 , address-999b8d23797c584fae3b4eeb1d34ec7c47c09947 +nodepath-m/44'/60'/0'/46/ , relayerIndex-22 , address-0e693d8977bc1b64ff8ba886e2c06d1499ac96af +nodepath-m/44'/60'/0'/46/ , relayerIndex-23 , address-3ed6c0fdb708baa70c87e3b3d74b41a71e9b675a +nodepath-m/44'/60'/0'/46/ , relayerIndex-24 , address-5aa5533e35b91a7f2ea6804521530d20cb0bc04a +nodepath-m/44'/60'/0'/46/ , relayerIndex-25 , address-5bf2a054b2dfa11a459c9bda015b7a45c445184d +nodepath-m/44'/60'/0'/46/ , relayerIndex-26 , address-fb703188009243036104c93f32dbce9395df41de +nodepath-m/44'/60'/0'/46/ , relayerIndex-27 , address-c5b27bc8da4a419bfc1fa041e5e5161408e142e8 +nodepath-m/44'/60'/0'/46/ , relayerIndex-28 , address-d143d2d99682a8034cc0e4018c03cd7484ece42a +nodepath-m/44'/60'/0'/46/ , relayerIndex-29 , address-6b65b6f47cc5d7d35bd502e009e31ba2f097165a +nodepath-m/44'/60'/0'/46/ , relayerIndex-30 , address-727d53351dc46bd4621e336cb00d3a45b5b8a6fc +nodepath-m/44'/60'/0'/46/ , relayerIndex-31 , address-f4f5e504464833041da30aed43e5eff8788d5444 +nodepath-m/44'/60'/0'/46/ , relayerIndex-32 , address-79e913679ab7a930615ec60bc5234f1f53e8a691 +nodepath-m/44'/60'/0'/46/ , relayerIndex-33 , address-9373afeffe3198dc81cb79996e4c37f8fb8b0efe +nodepath-m/44'/60'/0'/46/ , relayerIndex-34 , address-ca56dca87a6ae3411306742dee3973c917531550 +nodepath-m/44'/60'/0'/46/ , relayerIndex-35 , address-7876e95dfc50c27587f4b02268c182be6ece66ad +nodepath-m/44'/60'/0'/46/ , relayerIndex-36 , address-692f5c9679807deb380ade18ac2840bb61deaddc +nodepath-m/44'/60'/0'/46/ , relayerIndex-37 , address-84a1334f36fae3b691d271a741256e206ebc9221 +nodepath-m/44'/60'/0'/46/ , relayerIndex-38 , address-67504ea7de741573384342a9dc31c619a0e94d31 +nodepath-m/44'/60'/0'/46/ , relayerIndex-39 , address-f8a840768f9ee41d1debf712bd078008de3b331c +nodepath-m/44'/60'/0'/46/ , relayerIndex-40 , address-778918f738543ba0b8bbf8bfc9c9dd218e286ce6 +nodepath-m/44'/60'/0'/46/ , relayerIndex-41 , address-422a6749982a075448f6ba4ca7035397fe20038d +nodepath-m/44'/60'/0'/46/ , relayerIndex-42 , address-be4b5adea0edef05489ded6b027a787964ac2803 +nodepath-m/44'/60'/0'/46/ , relayerIndex-43 , address-33ad6f7d1e496a690d0038ed7dbbc83f42043fcb +nodepath-m/44'/60'/0'/46/ , relayerIndex-44 , address-42fbcdaf4a6b7733e9cb07a4dd5f6c520489d27e +nodepath-m/44'/60'/0'/46/ , relayerIndex-45 , address-9d2a1c813b2794cce58ba6a1de08ab25436f49f7 +nodepath-m/44'/60'/0'/46/ , relayerIndex-46 , address-8b6805a27eeb018d15a3f131846bee733bb2e672 +nodepath-m/44'/60'/0'/46/ , relayerIndex-47 , address-13bcb1fe8886f14c87850a8ae69c2cf30422c235 +nodepath-m/44'/60'/0'/46/ , relayerIndex-48 , address-e298e46349ca03fa26af29ad2401a40fbf785f2a +nodepath-m/44'/60'/0'/46/ , relayerIndex-49 , address-5e75cc325db7110b2d5069b07d5be79444793024 +nodepath-m/44'/60'/0'/47/ , relayerIndex-0 , address-549420de8ad4624b7447ca01ea6595895d286d82 +nodepath-m/44'/60'/0'/47/ , relayerIndex-1 , address-ce34a94b093adfe724a99f21cc8d3fa73c5bd83e +nodepath-m/44'/60'/0'/47/ , relayerIndex-2 , address-58a29db5b4c38b3a4d91a1337eeabfc1dc0dbba4 +nodepath-m/44'/60'/0'/47/ , relayerIndex-3 , address-1a0ccc404f380ec981b640f246cdc739a282c059 +nodepath-m/44'/60'/0'/47/ , relayerIndex-4 , address-2b498377c297012d5a73378f493614f3957e7de7 +nodepath-m/44'/60'/0'/47/ , relayerIndex-5 , address-d98238714903d63898c10bef65de1da2476eb248 +nodepath-m/44'/60'/0'/47/ , relayerIndex-6 , address-b56db8751baea84da895c3e793f19c175972b282 +nodepath-m/44'/60'/0'/47/ , relayerIndex-7 , address-184cdcc1e7afd0d26a3476cfd26e12e83f1445ee +nodepath-m/44'/60'/0'/47/ , relayerIndex-8 , address-5833aaa8a893f441f913e9f31fa126a4051a4332 +nodepath-m/44'/60'/0'/47/ , relayerIndex-9 , address-e7748cb8ab87d9f5759e7606c7d52a225bd73554 +nodepath-m/44'/60'/0'/47/ , relayerIndex-10 , address-fc3326a2fb794c3a0cbfb51a14eae913d4d76329 +nodepath-m/44'/60'/0'/47/ , relayerIndex-11 , address-90c46a29b9dfbde920c3bd61e0593e6728232d72 +nodepath-m/44'/60'/0'/47/ , relayerIndex-12 , address-944a26837bb935bc189e00fe7b664c722cf710c9 +nodepath-m/44'/60'/0'/47/ , relayerIndex-13 , address-44d3410d9e7731e0a03e491d86f1aac26bba3403 +nodepath-m/44'/60'/0'/47/ , relayerIndex-14 , address-e3f472f83ea071a2d1258296113854056d59eb99 +nodepath-m/44'/60'/0'/47/ , relayerIndex-15 , address-1a90b696c1a134e858c148d6bd42557b990e8509 +nodepath-m/44'/60'/0'/47/ , relayerIndex-16 , address-ba8530bbe854ceb20717a883bd31a4685cbf8129 +nodepath-m/44'/60'/0'/47/ , relayerIndex-17 , address-fbbc558ee390236f48a7df2c33d03305ea33a112 +nodepath-m/44'/60'/0'/47/ , relayerIndex-18 , address-04a62278050ba8ad028191c0855309dcefb7a55e +nodepath-m/44'/60'/0'/47/ , relayerIndex-19 , address-24f806def566f72172a0037e9db965307e407ecd +nodepath-m/44'/60'/0'/47/ , relayerIndex-20 , address-da9889dd6f9fc6a1156bccd996d6e498ed4f131d +nodepath-m/44'/60'/0'/47/ , relayerIndex-21 , address-a137f8999a74fc089a2b3f5547b7534e3821528f +nodepath-m/44'/60'/0'/47/ , relayerIndex-22 , address-5d267d069ae5d8f92812f9d95f27bdf04357bfc6 +nodepath-m/44'/60'/0'/47/ , relayerIndex-23 , address-03d12cebb3d3ece7ddd71f9e9b632e2b7d12c079 +nodepath-m/44'/60'/0'/47/ , relayerIndex-24 , address-1df507f1c9ec619cea01bcdb194154db25e1cfe0 +nodepath-m/44'/60'/0'/47/ , relayerIndex-25 , address-6e5ecd07cd3312bc34714c8f71b1c35885cc62c5 +nodepath-m/44'/60'/0'/47/ , relayerIndex-26 , address-05c27920cf01e6c1401a84299a3d8438a9d3197c +nodepath-m/44'/60'/0'/47/ , relayerIndex-27 , address-d2cefc856c4c83543deb52846868c12fdf2f6998 +nodepath-m/44'/60'/0'/47/ , relayerIndex-28 , address-6cff3893a42af577b2fe75baa285db5c3c83b199 +nodepath-m/44'/60'/0'/47/ , relayerIndex-29 , address-3b91b699e85464a3c560d955a235dc9440f42ccc +nodepath-m/44'/60'/0'/47/ , relayerIndex-30 , address-9f97fb1d607ae9b30a7a000b33b82ae1f7586515 +nodepath-m/44'/60'/0'/47/ , relayerIndex-31 , address-d735e7fca940d1029431968e0165e733b53b3b9a +nodepath-m/44'/60'/0'/47/ , relayerIndex-32 , address-2cd1898b176f0ec395762a760ea1696b36fbc929 +nodepath-m/44'/60'/0'/47/ , relayerIndex-33 , address-bbdc76a1109b4d65eb55064077edff81917d59ad +nodepath-m/44'/60'/0'/47/ , relayerIndex-34 , address-a80b6103324abf3a82a7b4cfd17ddb31e43bf046 +nodepath-m/44'/60'/0'/47/ , relayerIndex-35 , address-dbcd3e12d51402c0b9200838f79e5d92ab7dc476 +nodepath-m/44'/60'/0'/47/ , relayerIndex-36 , address-1b329d3b98184eedb302bf22884a3fbc74d5360f +nodepath-m/44'/60'/0'/47/ , relayerIndex-37 , address-e59543bdb7344e4cb3547df80dc7932d7f398c85 +nodepath-m/44'/60'/0'/47/ , relayerIndex-38 , address-14867c377b781a6e68ddb117d3582aa4006e3b29 +nodepath-m/44'/60'/0'/47/ , relayerIndex-39 , address-c84d463a9e79ad07e945d9baa61ed4e9172b13d7 +nodepath-m/44'/60'/0'/47/ , relayerIndex-40 , address-f90c00260e2fc19193f39619ed57e3e667832f6c +nodepath-m/44'/60'/0'/47/ , relayerIndex-41 , address-25261cc8bf79aec5ec2f29750e568100a0e7e1e1 +nodepath-m/44'/60'/0'/47/ , relayerIndex-42 , address-646688b73e5794ed521efdcc98f803bedd190cad +nodepath-m/44'/60'/0'/47/ , relayerIndex-43 , address-167e371e4d434eabca5f0b7dfb6790eb38cb77f9 +nodepath-m/44'/60'/0'/47/ , relayerIndex-44 , address-26ed83ba83bedc261a20d026318fd6201f100c1d +nodepath-m/44'/60'/0'/47/ , relayerIndex-45 , address-a8476f9a71d863ec32a475715ce07883bdfeb585 +nodepath-m/44'/60'/0'/47/ , relayerIndex-46 , address-4811563a02267d823494ca6f78e1548bd26c0abc +nodepath-m/44'/60'/0'/47/ , relayerIndex-47 , address-17b965c2f30d9ba9cb3efc369cdd98bc70cd06d7 +nodepath-m/44'/60'/0'/47/ , relayerIndex-48 , address-fa711998ee5631811ea0ab976ab105f5acd9c164 +nodepath-m/44'/60'/0'/47/ , relayerIndex-49 , address-dede358b6872e23e37ddb7ae6637dd4162f244c7 +nodepath-m/44'/60'/0'/48/ , relayerIndex-0 , address-71b19c2e41971b43d1f0bf441ccd3fbff0058499 +nodepath-m/44'/60'/0'/48/ , relayerIndex-1 , address-47691c444cb2332d36d33d02bf4f9c5839beece3 +nodepath-m/44'/60'/0'/48/ , relayerIndex-2 , address-190a526246e0c52e9959eb185da1043f0f80333f +nodepath-m/44'/60'/0'/48/ , relayerIndex-3 , address-dbd92f2a3a1d2d9ed6dbf24a7a1655203950c259 +nodepath-m/44'/60'/0'/48/ , relayerIndex-4 , address-7690a5ec4e6bbdf498803de54756ca114bcfd1dd +nodepath-m/44'/60'/0'/48/ , relayerIndex-5 , address-4233d43f2f93b445a04e94bc6ba4f8481aeca962 +nodepath-m/44'/60'/0'/48/ , relayerIndex-6 , address-7d0f227aaea79b86d64c6dc39c091e270fb8aad5 +nodepath-m/44'/60'/0'/48/ , relayerIndex-7 , address-0c66f3e307cd37bc2660c42c94470c939c1233e7 +nodepath-m/44'/60'/0'/48/ , relayerIndex-8 , address-b2fd9dd46f577a7eb641d238289c1204b6e3edc0 +nodepath-m/44'/60'/0'/48/ , relayerIndex-9 , address-bd29b2d230a2cd7ab73ad1ea3f0b49bc74b90ef2 +nodepath-m/44'/60'/0'/48/ , relayerIndex-10 , address-dcc73f2363eaca58a3e0c9e07e44398a1730cc82 +nodepath-m/44'/60'/0'/48/ , relayerIndex-11 , address-6230bbe30a08ff949f66d417a75c4d19b5a1ea98 +nodepath-m/44'/60'/0'/48/ , relayerIndex-12 , address-945201731b32d1f5d4e5245bcd9a857a70a68d7c +nodepath-m/44'/60'/0'/48/ , relayerIndex-13 , address-9e6ff00376eeaab687272f9692f7f0a30423a56a +nodepath-m/44'/60'/0'/48/ , relayerIndex-14 , address-55b803d6b39e393e10510fc2879b935257bd69a0 +nodepath-m/44'/60'/0'/48/ , relayerIndex-15 , address-d9cde5a30bf6fb2a322e70d08d4b8c18c6aea2ad +nodepath-m/44'/60'/0'/48/ , relayerIndex-16 , address-8eb01cb80ae23288c74a3f542725230969a82d79 +nodepath-m/44'/60'/0'/48/ , relayerIndex-17 , address-52cc07ccd461691adc860baf33e47e0fafe82539 +nodepath-m/44'/60'/0'/48/ , relayerIndex-18 , address-c00abe8e887b9fb969f9d49f1c8ee1a06e23586b +nodepath-m/44'/60'/0'/48/ , relayerIndex-19 , address-681eb60b9d70bcd9a902972132cb134a6fc28b08 +nodepath-m/44'/60'/0'/48/ , relayerIndex-20 , address-8007a3d0d368aec34638385f79ede5f83a81ebc2 +nodepath-m/44'/60'/0'/48/ , relayerIndex-21 , address-08fbb903919c47b6b3227fd25bacef8a200f25cb +nodepath-m/44'/60'/0'/48/ , relayerIndex-22 , address-419ea1b7fe4c57d18e804461d7d201467b59969a +nodepath-m/44'/60'/0'/48/ , relayerIndex-23 , address-a96987a1894efa20ce59e981d6c42aa051d795a7 +nodepath-m/44'/60'/0'/48/ , relayerIndex-24 , address-b877f4cfe22b5261fde77969b3b1d97095d13519 +nodepath-m/44'/60'/0'/48/ , relayerIndex-25 , address-48b18d3ebc2acbf960d42cfb23f0c4b7ab4a5aa2 +nodepath-m/44'/60'/0'/48/ , relayerIndex-26 , address-385b4f513c4b0f4b56e5d5e72cf7a14b4074d032 +nodepath-m/44'/60'/0'/48/ , relayerIndex-27 , address-a4c21f7e42fcb11dfcee21151b06df7939549ca3 +nodepath-m/44'/60'/0'/48/ , relayerIndex-28 , address-f4e0fc54975d1405874605a50b1dc6991819a22f +nodepath-m/44'/60'/0'/48/ , relayerIndex-29 , address-63117736b5608646095e4a3595008bae4218532c +nodepath-m/44'/60'/0'/48/ , relayerIndex-30 , address-cae9789532d21364455b51693fce1295d36f6f56 +nodepath-m/44'/60'/0'/48/ , relayerIndex-31 , address-0d9717cd3a4d92cbeed5c47668cd989959a3c333 +nodepath-m/44'/60'/0'/48/ , relayerIndex-32 , address-6e3a5b1c5405faf2eb2f070376e3212c4bba55aa +nodepath-m/44'/60'/0'/48/ , relayerIndex-33 , address-5c03c6234d1de150557a727a95e60472f5bdd4ae +nodepath-m/44'/60'/0'/48/ , relayerIndex-34 , address-7e745779cedf7b212e0e762e80d991a262cf7df9 +nodepath-m/44'/60'/0'/48/ , relayerIndex-35 , address-b6f0efc3a6a9b99f28a148eca662586a536f5329 +nodepath-m/44'/60'/0'/48/ , relayerIndex-36 , address-cb57ff865e72d8cf584c995ae6c0370b21b3293b +nodepath-m/44'/60'/0'/48/ , relayerIndex-37 , address-20c3987c4c9efb513bf6ac6616b4534d5abc4bc3 +nodepath-m/44'/60'/0'/48/ , relayerIndex-38 , address-025be5650d9b4d86665cc2038f1ff997d9c76780 +nodepath-m/44'/60'/0'/48/ , relayerIndex-39 , address-efba315a3e68a4b517125157a529f8368888a63c +nodepath-m/44'/60'/0'/48/ , relayerIndex-40 , address-53a67f7f99b394183a70f96f91d09015e092d504 +nodepath-m/44'/60'/0'/48/ , relayerIndex-41 , address-cdeaa2e866bb8950691bba357ad119fe074a8f2f +nodepath-m/44'/60'/0'/48/ , relayerIndex-42 , address-ffb89ec0476136600608198818045097cf91e2d6 +nodepath-m/44'/60'/0'/48/ , relayerIndex-43 , address-f5440cdae1243ace707c57f908fc2820b61616a5 +nodepath-m/44'/60'/0'/48/ , relayerIndex-44 , address-c41aaffeb1e65b0951dab33659ddccfc55055616 +nodepath-m/44'/60'/0'/48/ , relayerIndex-45 , address-7b11b2899a362fdf3597dbcd0a15328b6a0d17ce +nodepath-m/44'/60'/0'/48/ , relayerIndex-46 , address-0de1aa78d77e545d6c6bde47d34e5a4bf975af16 +nodepath-m/44'/60'/0'/48/ , relayerIndex-47 , address-0f0ae5a75c9d1145f42a48f289b9a092e439ba02 +nodepath-m/44'/60'/0'/48/ , relayerIndex-48 , address-5b23d785ddd74d9ee3f958afc12ab310fdb71033 +nodepath-m/44'/60'/0'/48/ , relayerIndex-49 , address-cda5b3110e8221f3a69dad991c9ebb757215efae +nodepath-m/44'/60'/0'/49/ , relayerIndex-0 , address-a3ea44ab474c61b9deb23bfeca745468698d78dd +nodepath-m/44'/60'/0'/49/ , relayerIndex-1 , address-0d5a028204fc5528f75c0e8064cb9fe4d3024795 +nodepath-m/44'/60'/0'/49/ , relayerIndex-2 , address-1dcb604a999d4ab320773b9806abba5eab210026 +nodepath-m/44'/60'/0'/49/ , relayerIndex-3 , address-f7540a2031d891b586d7ed0c4b3ae75d8d156731 +nodepath-m/44'/60'/0'/49/ , relayerIndex-4 , address-3f9d745800f434a8923ea46e53494becf752bf32 +nodepath-m/44'/60'/0'/49/ , relayerIndex-5 , address-082eee9a0490f1b218d50e5bd78b93705919f767 +nodepath-m/44'/60'/0'/49/ , relayerIndex-6 , address-01a41cfb5a45e16ba76fe3b0357a7ae0b092bb5b +nodepath-m/44'/60'/0'/49/ , relayerIndex-7 , address-5554b4caf9cbc901a24c3e2ce61eb31a4bf7def0 +nodepath-m/44'/60'/0'/49/ , relayerIndex-8 , address-f9e35459b7aaf00586f88f5b2fc6a2c419b8dd5d +nodepath-m/44'/60'/0'/49/ , relayerIndex-9 , address-0061dbaec204030e005d54c6038f21659e68f9fc +nodepath-m/44'/60'/0'/49/ , relayerIndex-10 , address-d584ef03ccbe22d402dc60b336b7714e19249b1b +nodepath-m/44'/60'/0'/49/ , relayerIndex-11 , address-1cce1ed9bbcc0111739109932bf790a80d732e19 +nodepath-m/44'/60'/0'/49/ , relayerIndex-12 , address-99f1cf5c1dd555ced1d777a3f63787a48b1fa5b1 +nodepath-m/44'/60'/0'/49/ , relayerIndex-13 , address-e2c5baea69376d0382d1b30fd97f2019d2f004ed +nodepath-m/44'/60'/0'/49/ , relayerIndex-14 , address-5ed26f5817bd70a18675b0d21f7b7676cfacb1bd +nodepath-m/44'/60'/0'/49/ , relayerIndex-15 , address-96f451d62d126daa666ebc5f3a43b2be1844bc1d +nodepath-m/44'/60'/0'/49/ , relayerIndex-16 , address-9227d1985f58e4c4cdc1e1081328cc55e407623e +nodepath-m/44'/60'/0'/49/ , relayerIndex-17 , address-aade902ac855c6ea58f220a0cce31a9d7ca65fbf +nodepath-m/44'/60'/0'/49/ , relayerIndex-18 , address-54d3337262bebc581b43f1e4e075576bf4cadb95 +nodepath-m/44'/60'/0'/49/ , relayerIndex-19 , address-757fa8564266e351220403e098b62c9e3a5bc93a +nodepath-m/44'/60'/0'/49/ , relayerIndex-20 , address-2dd86fc2d68608d0a847821a85ccea5f9daae9d8 +nodepath-m/44'/60'/0'/49/ , relayerIndex-21 , address-2e8e0a3022e530275e0a8d65eef240a4b8b30337 +nodepath-m/44'/60'/0'/49/ , relayerIndex-22 , address-570b999ba7db3ab0efc5d2fd1e93d73465457ab0 +nodepath-m/44'/60'/0'/49/ , relayerIndex-23 , address-823e759f7f3705f88dbb12bec82eb315ee763ed9 +nodepath-m/44'/60'/0'/49/ , relayerIndex-24 , address-799c49d0e1adcf98aba48f6759e1fb1305e735cf +nodepath-m/44'/60'/0'/49/ , relayerIndex-25 , address-5edad796ccbd4adfcf6ccf2775166e2b0e3e4f1b +nodepath-m/44'/60'/0'/49/ , relayerIndex-26 , address-7012acc592dd533e921a1af354b474e5a9f0a9cf +nodepath-m/44'/60'/0'/49/ , relayerIndex-27 , address-267c107796d145ef45a7c7d5c9ec70f893f29a80 +nodepath-m/44'/60'/0'/49/ , relayerIndex-28 , address-3417a4a2016cfedc448594d418f25f6ab3cf24c2 +nodepath-m/44'/60'/0'/49/ , relayerIndex-29 , address-53264e484833a47fcd92ea7bdf58b0175103e220 +nodepath-m/44'/60'/0'/49/ , relayerIndex-30 , address-9924ef4543a67a0dcacd4191bb318050df6b7cfc +nodepath-m/44'/60'/0'/49/ , relayerIndex-31 , address-b0197f643654c9e9f3722474699df83fd4324177 +nodepath-m/44'/60'/0'/49/ , relayerIndex-32 , address-13a3c6a517c7691016e39ec697be558ef6b679e4 +nodepath-m/44'/60'/0'/49/ , relayerIndex-33 , address-0921ae572eedee7c155e083ef5720ea83b6da885 +nodepath-m/44'/60'/0'/49/ , relayerIndex-34 , address-ddf4cd5436bd2f0e9b6f6e78951adfa21e69c2c9 +nodepath-m/44'/60'/0'/49/ , relayerIndex-35 , address-77b5d9809357e5299effee85221d38f1118f96cc +nodepath-m/44'/60'/0'/49/ , relayerIndex-36 , address-c0a37a4bce2c4d81611b9ef2d34c3ad7fba98a9a +nodepath-m/44'/60'/0'/49/ , relayerIndex-37 , address-43a850ab0d6f9b23931f95ad297454be87b9dfeb +nodepath-m/44'/60'/0'/49/ , relayerIndex-38 , address-267f7660a96be58e08e690c30f2fb32b2a88383e +nodepath-m/44'/60'/0'/49/ , relayerIndex-39 , address-bf5f050ab9d010f65736668a1e296c4b353ad8ef +nodepath-m/44'/60'/0'/49/ , relayerIndex-40 , address-46ece936c745742f6860733956f0e434d2207ec9 +nodepath-m/44'/60'/0'/49/ , relayerIndex-41 , address-0887b2fff55509ffde80ad761c7a5bab065e37b2 +nodepath-m/44'/60'/0'/49/ , relayerIndex-42 , address-62b813b010508ccbb82b5b08ad182905e71ecf2c +nodepath-m/44'/60'/0'/49/ , relayerIndex-43 , address-904ed995f2f6ec11c2644b00069008bd6bf6d6a0 +nodepath-m/44'/60'/0'/49/ , relayerIndex-44 , address-bc7fd94c50077ede2c1a3028df3038ee79b0aa7d +nodepath-m/44'/60'/0'/49/ , relayerIndex-45 , address-98f052ce3b07856a7435006e64bfa0acbda08cd4 +nodepath-m/44'/60'/0'/49/ , relayerIndex-46 , address-422bcabe96051dd434d922e0ce4bd844fc3a0dd1 +nodepath-m/44'/60'/0'/49/ , relayerIndex-47 , address-f388c6655b16aa6352942d28e2400d5dd32e1c09 +nodepath-m/44'/60'/0'/49/ , relayerIndex-48 , address-fe7788b54f61da1d8058233b4dbaffc666bed42d +nodepath-m/44'/60'/0'/49/ , relayerIndex-49 , address-f2e5611c53ee0a636e3a8cdc66d1c2cc5a58ad2d +nodepath-m/44'/60'/0'/50/ , relayerIndex-0 , address-39312d68dc57ac364fb795190c1e142531bc51aa +nodepath-m/44'/60'/0'/50/ , relayerIndex-1 , address-67cd06c1a48e559278f217c43ac4e97616beeba2 +nodepath-m/44'/60'/0'/50/ , relayerIndex-2 , address-17f37c825c49493ef703ca1a02cb001137568de3 +nodepath-m/44'/60'/0'/50/ , relayerIndex-3 , address-0c50bb59a4e32f38e0170a7763c82f12e29ac34d +nodepath-m/44'/60'/0'/50/ , relayerIndex-4 , address-fc2da90559702659b107c9cafaeaa72529387828 +nodepath-m/44'/60'/0'/50/ , relayerIndex-5 , address-714ecc7009ad5572513576113011d10558fd710b +nodepath-m/44'/60'/0'/50/ , relayerIndex-6 , address-5976312e5f5b795c1507c8015f9504a52b4b7045 +nodepath-m/44'/60'/0'/50/ , relayerIndex-7 , address-265a31c003c52b22447b2f144e876c7f0836d939 +nodepath-m/44'/60'/0'/50/ , relayerIndex-8 , address-f275e03f26c82e51802c4759c68334254bd0f18c +nodepath-m/44'/60'/0'/50/ , relayerIndex-9 , address-3a9ddd69719fb3116c57f9427b9d97e3d877c022 +nodepath-m/44'/60'/0'/50/ , relayerIndex-10 , address-1a4c3fff8fa52182046f3e8214e9dce545be03a5 +nodepath-m/44'/60'/0'/50/ , relayerIndex-11 , address-dd0deed524cfb642b4c8f2d3ec369bfcb14feb2e +nodepath-m/44'/60'/0'/50/ , relayerIndex-12 , address-bcd3d0fdf3b2458c7cb19a125f073fdf2ed3d7b5 +nodepath-m/44'/60'/0'/50/ , relayerIndex-13 , address-dd1f55003837d40403704bc66c323d080f65e596 +nodepath-m/44'/60'/0'/50/ , relayerIndex-14 , address-b7c9190a4eeee2882f4ec468d9eda40eb4b82544 +nodepath-m/44'/60'/0'/50/ , relayerIndex-15 , address-a268163598123de72c7a78fd52423683bf38ba48 +nodepath-m/44'/60'/0'/50/ , relayerIndex-16 , address-9a68de35056accd3bc5619ba755b29a3d8fc41e6 +nodepath-m/44'/60'/0'/50/ , relayerIndex-17 , address-9bfbeb3be3815487e507150d3f5b2de39c2decd6 +nodepath-m/44'/60'/0'/50/ , relayerIndex-18 , address-66e84c1ff1a792c9beaeef0038bf59e3ba4b6a36 +nodepath-m/44'/60'/0'/50/ , relayerIndex-19 , address-53319477f39cb286142af465597ff8fd994031da +nodepath-m/44'/60'/0'/50/ , relayerIndex-20 , address-acd8fd964c8fb803b7ce3dd2f19e1addfc18e832 +nodepath-m/44'/60'/0'/50/ , relayerIndex-21 , address-822ba59a2f9d8c2be11f154a8af3cacc4425e0c4 +nodepath-m/44'/60'/0'/50/ , relayerIndex-22 , address-ae6360a41ecff00bbf5105a78d6ba4eb030bee5a +nodepath-m/44'/60'/0'/50/ , relayerIndex-23 , address-79ae4f766d5e2c99c93fb987fd16aeec20a10760 +nodepath-m/44'/60'/0'/50/ , relayerIndex-24 , address-63a52931e579ceafdef8b4571071fc5b2864bb46 +nodepath-m/44'/60'/0'/50/ , relayerIndex-25 , address-590ab00a13afea5c282b7cc04ecdae9b34c9bfb6 +nodepath-m/44'/60'/0'/50/ , relayerIndex-26 , address-7c4d6afdbd6949b65e9ab4fe0e0c07b3e2f652a2 +nodepath-m/44'/60'/0'/50/ , relayerIndex-27 , address-933e01647129f378a708178bda396cd4d987f4bb +nodepath-m/44'/60'/0'/50/ , relayerIndex-28 , address-72a03dbdbf29f86ac1ad0fd4930655297558f620 +nodepath-m/44'/60'/0'/50/ , relayerIndex-29 , address-4bab545766f724e19edc56b7b47c84726a840150 +nodepath-m/44'/60'/0'/50/ , relayerIndex-30 , address-8cac4695cdb0c83c00bc454603b3531a7c0571ad +nodepath-m/44'/60'/0'/50/ , relayerIndex-31 , address-414a14d31d1cf8d264948b658d7affeb3bd5f398 +nodepath-m/44'/60'/0'/50/ , relayerIndex-32 , address-63e5caeb1a18bdf2831c19c75d3d5c798ec6861f +nodepath-m/44'/60'/0'/50/ , relayerIndex-33 , address-900626f80cad3766e39120ceb2fefe59645f518b +nodepath-m/44'/60'/0'/50/ , relayerIndex-34 , address-2f4ae3268c819daa4581cb90ee052723d1253269 +nodepath-m/44'/60'/0'/50/ , relayerIndex-35 , address-386f40e01e12ccb1fdb78f0e9b2b3557f5c2dd21 +nodepath-m/44'/60'/0'/50/ , relayerIndex-36 , address-c2c424c2cdce961ea20ff317436b8c1b1d74c0d7 +nodepath-m/44'/60'/0'/50/ , relayerIndex-37 , address-b294b3f638494437e808fabcb15004c7bfe7bdb7 +nodepath-m/44'/60'/0'/50/ , relayerIndex-38 , address-54eafc7428f6b6841f9a811b8cfaf95fcc23299c +nodepath-m/44'/60'/0'/50/ , relayerIndex-39 , address-d38b9c3604759d4a83165fe3280160dbbe126d7a +nodepath-m/44'/60'/0'/50/ , relayerIndex-40 , address-edd44a975902a24f7c9078fdadc27f80af24600e +nodepath-m/44'/60'/0'/50/ , relayerIndex-41 , address-5bd5e22821b5bc47889208f50156e5066efe3a94 +nodepath-m/44'/60'/0'/50/ , relayerIndex-42 , address-2849396b2765c78f4561966ae16000c1aa56b42b +nodepath-m/44'/60'/0'/50/ , relayerIndex-43 , address-77ececeb62ffe13303672926f759cb64580714f7 +nodepath-m/44'/60'/0'/50/ , relayerIndex-44 , address-612008440da7c5b74082457abf4774a51e0751c9 +nodepath-m/44'/60'/0'/50/ , relayerIndex-45 , address-3d005bc96dede3d0a4107ddd33d3a1184053c696 +nodepath-m/44'/60'/0'/50/ , relayerIndex-46 , address-3aea31c18d6f3d42258a71a109fab4eb70bafca6 +nodepath-m/44'/60'/0'/50/ , relayerIndex-47 , address-431d75874f62294a484af77a6a37f821369a25e0 +nodepath-m/44'/60'/0'/50/ , relayerIndex-48 , address-7b972469ace9e79489746e0b5c319219b89a0c07 +nodepath-m/44'/60'/0'/50/ , relayerIndex-49 , address-8865a234e5a693f3ff4c67b92772868a6dc042b9 \ No newline at end of file diff --git a/package.json b/package.json index d0c34bf0..999e9a2b 100644 --- a/package.json +++ b/package.json @@ -18,6 +18,7 @@ "@slack/web-api": "^6.8.0", "@types/config": "^3.3.3", "@types/crypto-js": "^4.1.1", + "@types/express-prometheus-middleware": "^1.2.1", "@types/pino": "^7.0.5", "amqplib": "^0.10.3", "async-mutex": "^0.4.0", @@ -36,6 +37,7 @@ "ethereumjs-util": "^7.1.5", "express": "^4.18.2", "express-handlebars": "^6.0.6", + "express-prometheus-middleware": "^1.2.0", "handlebars": "^4.7.7", "hdkey": "^2.0.1", "husky": "^8.0.2", @@ -50,6 +52,7 @@ "pino": "^8.15.6", "pino-http": "^8.5.0", "pino-pretty": "^10.2.3", + "prom-client": "^15.1.0", "redis": "^4.3.1", "redlock": "^5.0.0-beta.2", "rest-api-errors": "^1.2.5", diff --git a/src/addresses.ts b/src/addresses.ts new file mode 100644 index 00000000..b22c1bdd --- /dev/null +++ b/src/addresses.ts @@ -0,0 +1,109 @@ +// import hdkey from 'hdkey'; +// import { privateToPublic, publicToAddress } from 'ethereumjs-util'; +// import { string } from 'joi'; +// const { ethers } = require('ethers'); +// const fs = require('fs'); + +// const RECIPIENT= ""; + +// const relayersMasterSeed = ''; +// const nodePathRoot = "m/44'/60'/0'/"; + +// // If you're using process.env or config, ensure you import and setup necessary configurations +// // const nodePathIndex = process.env.NODE_PATH_INDEX || 0; // Example fallback value + +// const seedInBuffer = Buffer.from(relayersMasterSeed, 'utf-8'); +// const ethRoot = hdkey.fromMasterSeed(seedInBuffer); +// const nodePath = `${nodePathRoot + 0}/`; +// // const ethNodePath: any = ethRoot.derive(nodePath + relayerIndex); +// // const privateKey = ethNodePath._privateKey.toString('hex'); +// // const ethPubkey = privateToPublic(ethNodePath.privateKey); + +// // const ethAddr = publicToAddress(ethPubkey).toString('hex'); + +// // console.log('Ethereum Address:', ethAddr); +// // console.log('Private key:', privateKey); + +// async function generateKeys(nodePath: string, numberOfRelayers: number): Promise { +// const addresses = []; + +// for (let relayerIndex = 0; relayerIndex < numberOfRelayers; relayerIndex++) { // Replace 10 with the desired number of iterations +// const ethNodePath: any = ethRoot.derive(nodePath + relayerIndex); +// const privateKey = ethNodePath._privateKey.toString('hex'); +// const ethPubkey = privateToPublic(ethNodePath.privateKey); +// const ethAddr = publicToAddress(ethPubkey).toString('hex'); +// // const balanceEther = await getBalance(ethAddr); + +// // console.log(`Relayer Index: ${relayerIndex}`); +// console.log('Ethereum Address:', ethAddr); +// const address = `nodepath-${nodePath} , relayerIndex-${relayerIndex} , address-${ethAddr}` +// addresses.push(address); + +// // console.log('Private key:', privateKey); +// // console.log(`Balance of address ${ethAddr}: ${balanceEther} Matic`); +// // console.log('----------------------'); +// // if (parseFloat(balanceEther) > 0.41 ){ +// // console.log(`Balance is greater than threshold, excess balance is ${parseFloat(balanceEther) - 0.41 }`) +// // await sendEther(privateKey, (parseFloat(balanceEther) - 0.41).toString()) +// // } +// } +// return addresses; + +// } + +// async function getBalance(ethAddr: string): Promise { +// let rpcUrl = 'https://polygon-mainnet.g.alchemy.com/v2/xa1frMC-ihYEJcVn9eybSfhc-OtXbixj'; +// let provider = new ethers.providers.JsonRpcProvider(rpcUrl); + +// // Query balance +// const balanceWei = await provider.getBalance(ethAddr); +// const balanceEther = ethers.utils.formatEther(balanceWei); +// return balanceEther + +// } + +// async function sendEther(privateKey: string, amount: string) { +// // Connect to the network +// let rpcUrl = 'https://polygon-mainnet.g.alchemy.com/v2/xa1frMC-ihYEJcVn9eybSfhc-OtXbixj'; +// let provider = new ethers.providers.JsonRpcProvider(rpcUrl); +// // Your private key (Replace with your actual private key) + +// // Create a wallet from the private key and connect it to the provider +// let wallet = new ethers.Wallet(privateKey, provider); + +// // Fetching current gas price from the network +// let currentGasPrice = await provider.getGasPrice(); +// // Setting the gas price 20% higher than the current gas price +// const increasedGasPrice = currentGasPrice.mul(130).div(100); +// console.log(increasedGasPrice) +// const transaction = { +// to: RECIPIENT, +// value: ethers.utils.parseEther(amount), +// gasPrice: increasedGasPrice +// }; + +// let tx = await wallet.sendTransaction(transaction); +// console.log(tx.hash); + +// await tx.wait(); +// console.log(`Transaction confirmed with hash: ${tx.hash}`); +// } + +// // sendEther(privateKey).catch(console.error); +// async function main() { +// let allAddresses: string[] = []; + +// const numberOfRelayers = 50; // or any other desired number +// for (let index = 0; index <= 50; index++) { +// const nodePath = `${nodePathRoot}${index}/`; +// const addresses = await generateKeys(nodePath, numberOfRelayers); +// allAddresses = allAddresses.concat(addresses); +// allAddresses.concat("------------------------------------------------------------------") +// } +// fs.writeFileSync('output.txt', allAddresses.join('\n')); + +// } + +// main().catch(error => { +// console.error('Error encountered:', error); +// }); diff --git a/src/common/cache/redis/RedisCacheService.ts b/src/common/cache/redis/RedisCacheService.ts index 5ce8d6be..0f16884d 100644 --- a/src/common/cache/redis/RedisCacheService.ts +++ b/src/common/cache/redis/RedisCacheService.ts @@ -242,3 +242,155 @@ export class RedisCacheService implements ICacheService { } } } + +// export class RedisCacheService implements ICacheService { +// private static instance: ICacheService; + +// private redisPool: Pool; + +// private redLock: Redlock | undefined; + +// private constructor() { +// this.redisPool = createPool({ +// create: async () => new Redis(config.dataSources.redisUrl), +// destroy: (client: Redis) => client.quit().then(() => { }), +// }, { +// max: 200, // maximum size of the pool +// min: 100, // minimum size of the pool +// // additional pool configuration options +// }); +// } + +// getRedLock(): Redlock | undefined { +// if (this.redLock) return this.redLock; +// return undefined; +// } + +// async unlockRedLock(redisLock: Lock) { +// try { +// if (redisLock && this.redLock) { +// await this.redLock.release(redisLock); +// } else { +// log.error('Redlock not initialized'); +// } +// } catch (error) { +// log.error(`Error in unlocking redis lock ${JSON.stringify(error)}`); +// } +// } + +// public static getInstance(): ICacheService { +// if (!RedisCacheService.instance) { +// RedisCacheService.instance = new RedisCacheService(); +// } +// return RedisCacheService.instance; +// } + +// async connect(): Promise { +// try { +// const client = await this.redisPool.acquire(); +// log.info(`Redis connection status: ${client.status}`); +// await this.redisPool.release(client); +// } catch (error) { +// log.error(`Error in connecting to redis client ${error}`); +// throw error; +// } +// } + +// async close(): Promise { +// await this.redisPool.drain(); +// await this.redisPool.clear(); +// } + +// async get(key: string): Promise { +// const client = await this.redisPool.acquire(); +// try { +// return await client.get(key) || ''; +// } catch (error) { +// log.error(`Error getting value for key ${key} - ${JSON.stringify(error)}`); +// return ''; +// } finally { +// await this.redisPool.release(client); +// } +// } + +// async set(key: string, value: string, hideValueInLogs: boolean = false): Promise { +// const client = await this.redisPool.acquire(); +// try { +// if (!hideValueInLogs) { +// log.info(`Setting cache value => Key: ${key} Value: ${value}`); +// } else { +// log.info(`Setting value in cache with key: ${key}`); +// } +// await client.set(key, value); +// log.info(`Cache value set for key: ${key}`); +// return true; +// } catch (error) { +// log.error(`Error setting value for key ${key} - ${JSON.stringify(error)}`); +// return false; +// } finally { +// await this.redisPool.release(client); +// } +// } + +// async increment(key: string, incrementBy: number = 1): Promise { +// const client = await this.redisPool.acquire(); +// try { +// const val = await client.get(key); +// if (!val) { +// log.info('Key does not exist. Nothing to increment'); +// return false; +// } +// await client.incrby(key, incrementBy); +// return true; +// } catch (error) { +// log.error(`Error in increment value - ${JSON.stringify(error)}`); +// return false; +// } finally { +// await this.redisPool.release(client); +// } +// } + +// async decrement(key: string, decrementBy: number = 1): Promise { +// const client = await this.redisPool.acquire(); +// try { +// const val = await client.get(key); +// if (val == null || val === 'undefined') { +// log.info('Key does not exist. Nothing to decrement'); +// return false; +// } +// await client.decrby(key, decrementBy); +// return true; +// } catch (error) { +// log.error(`Error in decrement value ${JSON.stringify(error)}`); +// return false; +// } finally { +// await this.redisPool.release(client); +// } +// } + +// async delete(key: string): Promise { +// const client = await this.redisPool.acquire(); +// try { +// const result = await client.del(key); +// return result === 1; +// } catch (error) { +// log.error(`Error in deleting key ${key} - ${JSON.stringify(error)}`); +// return false; +// } finally { +// await this.redisPool.release(client); +// } +// } + +// async expire(key: string, expiryTime: number): Promise { +// const client = await this.redisPool.acquire(); +// try { +// const result = await client.expire(key, expiryTime); +// return result === 1; +// } catch (error) { +// log.error(`Error setting expiry for key ${key} - ${JSON.stringify(error)}`); +// return false; +// } finally { +// await this.redisPool.release(client); +// } +// } +// } diff --git a/src/common/db/mongo/Mongo.ts b/src/common/db/mongo/Mongo.ts index 1910c013..10cd59ac 100644 --- a/src/common/db/mongo/Mongo.ts +++ b/src/common/db/mongo/Mongo.ts @@ -1,3 +1,4 @@ +/* eslint-disable no-await-in-loop */ /* eslint-disable import/no-import-module-exports */ import mongoose, { Mongoose } from "mongoose"; import { config } from "../../../config"; @@ -25,6 +26,102 @@ export class Mongo implements IDBService { this.client = null; } + /** + * Method to create an index on the 'transactionId' field in descending order + * for all collections in the database, if it doesn't already exist. + */ + async createTransactionIdIndexes(): Promise { + if (!this.client) { + throw new Error("Not connected to db"); + } + + try { + const collections = await this.client.connection.db + .listCollections() + .toArray(); + for (const collection of collections) { + const collectionName = collection.name; + const collectionObject = + this.client.connection.db.collection(collectionName); + + if (collectionName.startsWith("blockchaintransactions")) { + await this.createIndexes( + collectionObject, + { creationTime: -1, transactionId: 1 }, + { creationTime: -1, transactionId: 1, transactionHash: 1 }, + collectionName, + ); + } else if (collectionName.startsWith("useroperations")) { + await this.createIndexes( + collectionObject, + { creationTime: -1, transactionId: 1 }, + { creationTime: -1, transactionId: 1, userOpHash: 1 }, + collectionName, + ); + } + } + } catch (error) { + logger.error("Error while creating indexes"); + logger.error(error); + process.exit(); + } + } + + // eslint-disable-next-line class-methods-use-this + async createIndexes( + collectionObject: any, + compoundIndexOne: object, + compoundIndexTwo: object, + collectionName: string, + ): Promise { + const indexes = await collectionObject.indexes(); + log.info(indexes); + + const compoundIndexKeyOne = Object.entries(compoundIndexOne) + .map(([key, value]) => `${key}_${value}`) + .join("_"); + + log.info(`Compund index key name ${compoundIndexKeyOne}`); + const compoundIndexExistsOne = indexes.some( + (index: any) => index.name === compoundIndexKeyOne, + ); + + if (!compoundIndexExistsOne) { + await collectionObject.createIndex(compoundIndexOne, { + background: true, + }); + logger.info( + `Compound index ${compoundIndexKeyOne} created for collection ${collectionName}`, + ); + } else { + logger.info( + `Compound index ${compoundIndexKeyOne} found for collection ${collectionName}`, + ); + } + + const compoundIndexKeyTwo = Object.entries(compoundIndexTwo) + .map(([key, value]) => `${key}_${value}`) + .join("_"); + + log.info(`Compund index key name ${compoundIndexKeyTwo}`); + const compoundIndexExistsTwo = indexes.some( + (index: any) => index.name === compoundIndexKeyTwo, + ); + + if (!compoundIndexExistsTwo) { + await collectionObject.createIndex(compoundIndexTwo, { + background: true, + }); + logger.info( + `Compound index ${compoundIndexKeyTwo} created for collection ${collectionName}`, + ); + } else { + logger.info( + `Compound index ${compoundIndexKeyTwo} found for collection ${collectionName}`, + ); + } + } + /** * Method creates and returns new mongo instance * @returns mongo instance diff --git a/src/common/maps/index.ts b/src/common/maps/index.ts index e49f836d..4e3067df 100644 --- a/src/common/maps/index.ts +++ b/src/common/maps/index.ts @@ -3,7 +3,7 @@ import { TransactionType } from "../types"; // change below to assign relayer manager to transaction type export const relayerManagerTransactionTypeNameMap = { [TransactionType.AA]: "RM1", - [TransactionType.SCW]: "RM2", + [TransactionType.SCW]: "RM1", [TransactionType.FUNDING]: "RM0", [TransactionType.BUNDLER]: "RM1", }; diff --git a/src/common/notification/NotificationUtil.ts b/src/common/notification/NotificationUtil.ts index ae6b34cd..a52a6257 100644 --- a/src/common/notification/NotificationUtil.ts +++ b/src/common/notification/NotificationUtil.ts @@ -23,7 +23,7 @@ export const getMaxRetryCountNotificationMessage = ( transactionType: TransactionType, chainId: number, ) => { - const message = "Transaction Max retry Count Exceeded"; + const message = "TW Setup: Transaction Max retry Count Exceeded"; const details = `TransactionId: ${transactionId}\n has exceeded max retry count.\nRelayer Address: ${account.getPublicKey()}\nTransaction Type: ${transactionType}\nChain Id: ${chainId}`; const action = "Please observe and wait for the transaction to be mined. If the transaction is not mined at the earliest, please send a transaction with higher gas price"; @@ -35,7 +35,7 @@ export const getTransactionErrorNotificationMessage = ( chainId: number, error: any, ) => { - const message = "Transaction Error"; + const message = "TW Setup: Transaction Error"; // check if error is string or object const errorString = typeof error === "string" ? error : parseError(error); const details = `TransactionId: ${transactionId}\nChain Id: ${chainId}\nError: ${errorString}`; @@ -47,7 +47,7 @@ export const getTransactionNoOpNotificationMessage = ( transactionId: string, chainId: number, ) => { - const message = "Transaction No Op"; + const message = "TW Setup: Transaction No Op"; const details = `TransactionId: ${transactionId}\nChain Id: ${chainId}`; const action = undefined; return getMessage(NotificationLevel.INFO, message, details, action); @@ -58,7 +58,7 @@ export const getRelayerFundingNotificationMessage = ( chainId: number, transactionHash: string, ) => { - const message = "Relayer Funding"; + const message = "TW Setup: Relayer Funding"; const details = `Relayer Address: ${relayerAddress}\nChain Id: ${chainId}\nTransaction Hash: ${transactionHash}`; const action = undefined; return getMessage(NotificationLevel.INFO, message, details, action); @@ -69,7 +69,7 @@ export const getPendingTransactionIncreasingMessage = ( chainId: number, pendingCount: number, ) => { - const message = "Relayer Pending Transaction Increasing"; + const message = "TW Setup: Relayer Pending Transaction Increasing"; const details = `Relayer Address: ${relayerAddress}\nChain Id: ${chainId}\nPending Transaction Count: ${pendingCount}`; const action = "Keep an eye on the pending transaction count. If the pending transaction count is increasing, please increase the gas price of the transaction or check for stuck transactions"; @@ -82,7 +82,7 @@ export const getAccountUndefinedNotificationMessage = ( transactionType: string, relayerManagerName: string, ) => { - const message = "Account Undefined"; + const message = "TW Setup: Account Undefined"; const details = `TransactionId: ${transactionId}\nRelayer Address: ${relayerAddress}\nTransaction Type: ${transactionType}\nRelayer Manager Name: ${relayerManagerName}`; const action = undefined; return getMessage(NotificationLevel.ERROR, message, details, action); diff --git a/src/common/queue/AATransactionQueue.ts b/src/common/queue/AATransactionQueue.ts index 2df1227e..3a1287b7 100644 --- a/src/common/queue/AATransactionQueue.ts +++ b/src/common/queue/AATransactionQueue.ts @@ -10,7 +10,7 @@ const log = logger.child({ module: module.filename.split("/").slice(-4).join("/"), }); -const { queueUrl } = config; +const queueUrl = process.env.BUNDLER_QUEUE_URL || config.queueUrl; export class AATransactionQueue implements IQueue { private channel!: Channel; diff --git a/src/common/queue/BundlerTransactionQueue.ts b/src/common/queue/BundlerTransactionQueue.ts index 4714b34b..656e65f6 100644 --- a/src/common/queue/BundlerTransactionQueue.ts +++ b/src/common/queue/BundlerTransactionQueue.ts @@ -10,7 +10,7 @@ const log = logger.child({ module: module.filename.split("/").slice(-4).join("/"), }); -const { queueUrl } = config; +const queueUrl = process.env.BUNDLER_QUEUE_URL || config.queueUrl; export class BundlerTransactionQueue implements IQueue diff --git a/src/common/queue/SCWTransactionQueue.ts b/src/common/queue/SCWTransactionQueue.ts index 0c0beab8..7590f470 100644 --- a/src/common/queue/SCWTransactionQueue.ts +++ b/src/common/queue/SCWTransactionQueue.ts @@ -10,8 +10,7 @@ const log = logger.child({ module: module.filename.split("/").slice(-4).join("/"), }); -const { queueUrl } = config; - +const queueUrl = process.env.BUNDLER_QUEUE_URL || config.queueUrl; export class SCWTransactionQueue implements IQueue { private channel!: Channel; diff --git a/src/common/service-manager/index.ts b/src/common/service-manager/index.ts index ef9b10e5..35804d02 100644 --- a/src/common/service-manager/index.ts +++ b/src/common/service-manager/index.ts @@ -4,12 +4,7 @@ import { getContract, parseEther } from "viem"; import { chain } from "lodash"; import { config } from "../../config"; import { EVMAccount, IEVMAccount } from "../../relayer/account"; -import { - AAConsumer, - SCWConsumer, - SocketConsumer, - BundlerConsumer, -} from "../../relayer/consumer"; +import { SocketConsumer, BundlerConsumer } from "../../relayer/consumer"; import { EVMNonceManager } from "../../relayer/nonce-manager"; import { EVMRelayerManager, @@ -30,10 +25,8 @@ import { EVMNetworkService } from "../network"; import { NotificationManager } from "../notification"; import { SlackNotificationService } from "../notification/slack/SlackNotificationService"; import { - AATransactionQueue, BundlerTransactionQueue, RetryTransactionHandlerQueue, - SCWTransactionQueue, TransactionHandlerQueue, } from "../queue"; import { @@ -49,11 +42,9 @@ import { import { IStatusService, StatusService } from "../status"; import { CMCTokenPriceManager } from "../token-price"; import { - AATransactionMessageType, BundlerTransactionMessageType, EntryPointMapType, EVMRawTransactionType, - SCWTransactionMessageType, TransactionType, } from "../types"; import { UserOperationStateDAO } from "../db/dao/UserOperationStateDAO"; @@ -124,11 +115,13 @@ let statusService: IStatusService; (async () => { await dbInstance.connect(); + await dbInstance.createTransactionIdIndexes(); + await cacheService.connect(); const slackNotificationService = new SlackNotificationService( - config.slack.token, - config.slack.channel, + process.env.BUNDLER_SLACK_TOKEN || config.slack.token, + process.env.BUNDLER_SLACK_CHANNEL || config.slack.channel, ); const notificationManager = new NotificationManager(slackNotificationService); @@ -360,19 +353,21 @@ let statusService: IStatusService; ); log.info(`Retry transaction service setup for chainId: ${chainId}`); - log.info(`Setting up socket complete consumer for chainId: ${chainId}`); - socketConsumerMap[chainId] = new SocketConsumer({ - queue: transactionQueue, - options: { - chainId, - wssUrl: config.socketService.wssUrl, - EVMRelayerManagerMap, - }, - }); - transactionQueue.consume(socketConsumerMap[chainId].onMessageReceived); - log.info( - `Socket consumer setup complete for chainId: ${chainId} and attached to transaction queue`, - ); + if (!config.isTWSetup) { + log.info(`Setting up socket complete consumer for chainId: ${chainId}`); + socketConsumerMap[chainId] = new SocketConsumer({ + queue: transactionQueue, + options: { + chainId, + wssUrl: config.socketService.wssUrl, + EVMRelayerManagerMap, + }, + }); + transactionQueue.consume(socketConsumerMap[chainId].onMessageReceived); + log.info( + `Socket consumer setup complete for chainId: ${chainId} and attached to transaction queue`, + ); + } log.info(`Setting up fee options service for chainId: ${chainId}`); const feeOptionService = new FeeOption(gasPriceService, cacheService, { @@ -427,90 +422,7 @@ let statusService: IStatusService; // for each network get transaction type for (const type of supportedTransactionType[chainId]) { - if (type === TransactionType.AA) { - const aaRelayerManager = - EVMRelayerManagerMap[relayerManagerTransactionTypeNameMap[type]][ - chainId - ]; - if (!aaRelayerManager) { - throw new Error(`Relayer manager not found for ${type}`); - } - log.info(`Setting up AA transaction queue for chainId: ${chainId}`); - const aaQueue: IQueue = - new AATransactionQueue({ - chainId, - }); - - await aaQueue.connect(); - log.info(`AA transaction queue setup complete for chainId: ${chainId}`); - - log.info( - `Setting up AA consumer, relay service & simulation service for chainId: ${chainId}`, - ); - const aaConsumer = new AAConsumer({ - queue: aaQueue, - relayerManager: aaRelayerManager, - transactionService, - cacheService, - options: { - chainId, - entryPointMap, - }, - }); - // start listening for transaction - await aaQueue.consume(aaConsumer.onMessageReceived); - - const aaRelayService = new AARelayService(aaQueue); - routeTransactionToRelayerMap[chainId][type] = aaRelayService; - - log.info( - `AA consumer, relay service & simulation service setup complete for chainId: ${chainId}`, - ); - } else if (type === TransactionType.SCW) { - // queue for scw - log.info(`Setting up SCW transaction queue for chainId: ${chainId}`); - const scwQueue: IQueue = - new SCWTransactionQueue({ - chainId, - }); - await scwQueue.connect(); - log.info( - `SCW transaction queue setup complete for chainId: ${chainId}`, - ); - - const scwRelayerManager = - EVMRelayerManagerMap[relayerManagerTransactionTypeNameMap[type]][ - chainId - ]; - if (!scwRelayerManager) { - throw new Error(`Relayer manager not found for ${type}`); - } - - log.info( - `Setting up SCW consumer, relay service & simulation service for chainId: ${chainId}`, - ); - const scwConsumer = new SCWConsumer({ - queue: scwQueue, - relayerManager: scwRelayerManager, - transactionService, - cacheService, - options: { - chainId, - }, - }); - await scwQueue.consume(scwConsumer.onMessageReceived); - - const scwRelayService = new SCWRelayService(scwQueue); - routeTransactionToRelayerMap[chainId][type] = scwRelayService; - - scwSimulationServiceMap[chainId] = new SCWSimulationService( - networkService, - tenderlySimulationService, - ); - log.info( - `SCW consumer, relay service & simulation service setup complete for chainId: ${chainId}`, - ); - } else if (type === TransactionType.BUNDLER) { + if (type === TransactionType.BUNDLER) { const bundlerRelayerManager = EVMRelayerManagerMap[relayerManagerTransactionTypeNameMap[type]][ chainId diff --git a/src/common/simulation/BundlerSimulationService.ts b/src/common/simulation/BundlerSimulationService.ts index 7fc0e17f..a99c979c 100644 --- a/src/common/simulation/BundlerSimulationService.ts +++ b/src/common/simulation/BundlerSimulationService.ts @@ -225,7 +225,10 @@ export class BundlerSimulationService { verificationGasLimit += BigInt( Math.ceil(Number(verificationGasLimit) * 0.2), ); - if(chainId === BLOCKCHAINS.CHILIZ_MAINNET || chainId === BLOCKCHAINS.CHILIZ_TESTNET) { + if ( + chainId === BLOCKCHAINS.CHILIZ_MAINNET || + chainId === BLOCKCHAINS.CHILIZ_TESTNET + ) { verificationGasLimit += BigInt( Math.ceil(Number(verificationGasLimit) * 0.2), ); @@ -241,7 +244,7 @@ export class BundlerSimulationService { callGasLimit += BigInt(Math.ceil(Number(callGasLimit) * 0.5)); } - if(chainId === BLOCKCHAINS.MANTLE_MAINNET) { + if (chainId === BLOCKCHAINS.MANTLE_MAINNET) { preVerificationGas += preVerificationGas; } @@ -278,7 +281,9 @@ export class BundlerSimulationService { }, }; } catch (error: any) { - log.error(`Error in estimating user op: ${parseError(error)} on chainId: ${estimateUserOperationGasData.chainId}`); + log.error( + `Error in estimating user op: ${parseError(error)} on chainId: ${estimateUserOperationGasData.chainId}`, + ); return { code: error.code, message: parseError(error), @@ -618,7 +623,9 @@ export class BundlerSimulationService { }, }; } catch (error: any) { - log.error(`Error in simulateValidation: ${parseError(error)} on chainId: ${simulateValidationData.chainId}`); + log.error( + `Error in simulateValidation: ${parseError(error)} on chainId: ${simulateValidationData.chainId}`, + ); return { code: error.code, message: parseError(error), diff --git a/src/config/config.test.ts b/src/config/config.test.ts index 565145a1..62bfd6fe 100644 --- a/src/config/config.test.ts +++ b/src/config/config.test.ts @@ -26,6 +26,7 @@ describe("ConfigV2", () => { networksNotSupportingEthCallBytecodeStateOverrides, EVMNetworkService, relayer, + trustWalletSupportedNetworks, ...oldConfigValues } = nodeconfig.util.toObject(); diff --git a/src/config/index.ts b/src/config/index.ts index b5d3e8de..c8cbf59b 100644 --- a/src/config/index.ts +++ b/src/config/index.ts @@ -16,7 +16,10 @@ const PBKDF2_ITERATIONS = 310000; const AES_PADDING = crypto.pad.Pkcs7; const AES_MODE = crypto.mode.CBC; -export function merge(decryptedConfig: Partial): ConfigType { +// A part of our config is encrypted and other part is not. This function merges the decrypted part with the rest of the config. +export function mergeWithDecryptedConfig( + decryptedConfig: Partial, +): ConfigType { const merged = nodeconfig.util.toObject(); // We always take the relayer secrets from the old, decrypted config @@ -42,22 +45,24 @@ export function merge(decryptedConfig: Partial): ConfigType { export class Config implements IConfig { config: ConfigType; - constructor() { - log.info("Config loading started"); - try { - // decrypt the config and load it - const encryptedEnvPath = - process.env.BUNDLER_CONFIG_PATH || "config.json.enc"; + // eslint-disable-next-line class-methods-use-this + decryptConfig(): string { + const encryptedEnvPath = + process.env.BUNDLER_CONFIG_PATH || "./config.json.enc"; - const passphrase = process.env.CONFIG_PASSPHRASE; - if (!passphrase) { - throw new Error("Passphrase for config required in .env file"); - } + const passphrase = process.env.BUNDLER_CONFIG_PASSPHRASE; + if (!passphrase) { + throw new Error( + "BUNDLER_CONFIG_PASSPHRASE environment variable required", + ); + } - if (!existsSync(encryptedEnvPath)) { - throw new Error(`Invalid ENV Path: ${encryptedEnvPath}`); - } - const ciphertext = fs.readFileSync(encryptedEnvPath, "utf8"); + if (!existsSync(encryptedEnvPath)) { + throw new Error(`Invalid ENV Path: ${encryptedEnvPath}`); + } + const ciphertext = fs.readFileSync(encryptedEnvPath, "utf8"); + + try { // First 44 bits are Base64 encodded HMAC const hashInBase64 = ciphertext.substr(0, 44); @@ -91,9 +96,32 @@ export class Config implements IConfig { if (decryptedHmacInBase64 !== hashInBase64) { throw new Error("Error: HMAC does not match"); } + return plaintext; + } catch (e) { + const wrappedError = new Error( + `Config decryption failed (check your BUNDLER_CONFIG_PASSPHRASE): ${e}`, + ); + log.error(wrappedError); + throw wrappedError; + } + } + + constructor() { + log.info("Config loading started"); + try { + // decrypt the config and load it + const plaintext = this.decryptConfig(); const data = JSON.parse(plaintext) as ConfigType; - this.config = merge(data); + this.config = mergeWithDecryptedConfig(data); + + // check if BUNDLER_CHAIN_ID is set, if true override the supportedNetworks + // This exists to easily support single chain per bundler for TW + const chainId = parseInt(process.env.BUNDLER_CHAIN_ID || "", 10); + if (chainId) { + this.config.supportedNetworks = [chainId]; + } + this.validate(); log.info("Config loaded successfully"); diff --git a/src/config/interface/IConfig.ts b/src/config/interface/IConfig.ts index 739552c9..00e66231 100644 --- a/src/config/interface/IConfig.ts +++ b/src/config/interface/IConfig.ts @@ -168,6 +168,8 @@ export type ConfigType = { }; // default, hard-coded gas overheads defaultGasOverheads: DefaultGasOverheadType; + // true if this is running on a TrustWallet dedicated setup + isTWSetup: boolean; // array of chain Ids for networks that support https://eips.ethereum.org/EIPS/eip-1559 EIP1559SupportedNetworks: Array; // map of entrypoint addresses -> supported chain Ids diff --git a/src/config/static-config.json b/src/config/static-config.json index f8c002e8..240d9f56 100644 --- a/src/config/static-config.json +++ b/src/config/static-config.json @@ -7,23 +7,23 @@ 1, 80001, 137, 97, 56, 1442, 1101, 42161, 42170, 420, 10, 43113, 43114, 84531, 8453, 59140, 59144, 5000, 5001, 204, 5611, 88888, 592, 88882, 81, 1115, 1116, 169, 3441005, 91715, 9980, 421614, 11155111, 84532, 168587773, - 80085, 81457, 534351, 534352, 56400, 7000, 11155420 + 80085, 81457, 534351, 534352, 56400, 7000, 11155420, 80002 ], "EIP1559SupportedNetworks": [ 1, 80001, 137, 42161, 420, 10, 43114, 43113, 84531, 8453, 59140, 59144, 204, 5611, 421614, 11155111, 84532, 168587773, 80085, 81457, 42170, 169, 56400, - 11155420 + 11155420, 80002 ], "testnetNetworks": [ 5, 80001, 97, 1442, 421613, 420, 43113, 84531, 59140, 5001, 5611, 81, 88882, 59144, 421614, 11155111, 1115, 3441005, 84532, 168587773, 80085, 534351, - 56400, 11155420 + 56400, 11155420, 80002 ], "nonRM2SupportedNetworks": [ 97, 1442, 1101, 42170, 420, 10, 43113, 43114, 84531, 8453, 59140, 59144, 5000, 5001, 204, 5611, 88888, 592, 88882, 81, 1115, 1116, 169, 3441005, 91715, 9980, 421614, 11155111, 84532, 168587773, 80085, 81457, 534351, - 534352, 56400, 7000, 11155420 + 534352, 56400, 7000, 11155420, 80002 ], "supportedTransactionType": { "1": ["AA", "SCW", "BUNDLER"], @@ -72,7 +72,8 @@ "534352": ["AA", "BUNDLER"], "56400": ["AA", "BUNDLER"], "7000": ["AA", "BUNDLER"], - "11155420": ["AA", "BUNDLER"] + "11155420": ["AA", "BUNDLER"], + "80002": ["AA", "BUNDLER"] }, "chains": { "currency": { @@ -122,7 +123,8 @@ "534352": "ETH", "56400": "ZERO", "7000": "ZETA", - "11155420": "ETH" + "11155420": "ETH", + "80002": "MATIC" }, "decimal": { "5": 18, @@ -171,7 +173,8 @@ "534352": 18, "56400": 18, "7000": 18, - "11155420": 18 + "11155420": 18, + "80002": 18 }, "retryTransactionInterval": { "5": 220000, @@ -220,7 +223,8 @@ "534352": 220000, "56400": 220000, "7000": 220000, - "11155420": 220000 + "11155420": 220000, + "80002": 120000 }, "updateFrequencyInSeconds": { "5": 60, @@ -269,7 +273,8 @@ "534352": 60, "56400": 60, "7000": 60, - "11155420": 60 + "11155420": 60, + "80002": 60 } }, "defaultGasOverheads": { @@ -306,7 +311,7 @@ "43113": 2, "43114": 100, "84531": 2, - "8453": 2, + "8453": 50, "59140": 2, "59144": 2, "1287": 2, @@ -320,14 +325,14 @@ "81": 1, "88882": 1, "1115": 1, - "1116": 1, + "1116": 15, "169": 1, "3441005": 1, "91715": 1, "9980": 1, "7116": 40, "421614": 5, - "11155111": 5, + "11155111": 10, "80085": 2, "168587773": 20, "84532": 2, @@ -336,7 +341,8 @@ "534352": 2, "56400": 100, "7000": 2, - "11155420": 2 + "11155420": 2, + "80002": 2 }, "maxRelayerCount": { "5": 10, @@ -355,7 +361,7 @@ "43113": 10, "43114": 80, "84531": 10, - "8453": 10, + "8453": 60, "59140": 10, "59144": 10, "1287": 10, @@ -369,7 +375,7 @@ "81": 5, "88882": 5, "1115": 5, - "1116": 5, + "1116": 20, "169": 5, "3441005": 5, "91715": 5, @@ -385,7 +391,8 @@ "534352": 5, "56400": 120, "7000": 5, - "11155420": 5 + "11155420": 5, + "80002": 5 }, "inactiveRelayerCountThreshold": { "5": 5, @@ -404,7 +411,7 @@ "43113": 5, "43114": 8, "84531": 5, - "8453": 5, + "8453": 55, "59140": 5, "59144": 5, "1287": 5, @@ -434,7 +441,8 @@ "534352": 5, "56400": 80, "7000": 3, - "11155420": 3 + "11155420": 3, + "80002": 3 }, "pendingTransactionCountThreshold": { "5": 15, @@ -483,7 +491,8 @@ "534352": 15, "56400": 15, "7000": 15, - "11155420": 15 + "11155420": 15, + "80002": 15 }, "fundingRelayerAmount": { "5": 0.001, @@ -502,7 +511,7 @@ "43113": 1, "43114": 0.5, "84531": 0.5, - "8453": 0.2, + "8453": 0.001, "59140": 0.1, "59144": 0.1, "1287": 1, @@ -516,7 +525,7 @@ "81": 5, "88882": 5, "1115": 0.4, - "1116": 10, + "1116": 0.2, "169": 0.002, "3441005": 0.002, "91715": 0.05, @@ -532,7 +541,8 @@ "534352": 0.03, "56400": 0.5, "7000": 5, - "11155420": 0.01 + "11155420": 0.01, + "80002": 1 }, "fundingBalanceThreshold": { "5": 0.03, @@ -551,7 +561,7 @@ "43113": 0.1, "43114": 0.2, "84531": 0.1, - "8453": 0.01, + "8453": 0.0008, "59140": 0.01, "59144": 0.01, "1287": 0.1, @@ -581,7 +591,8 @@ "534352": 0.015, "56400": 0.2, "7000": 2, - "11155420": 0.005 + "11155420": 0.005, + "80002": 0.5 }, "newRelayerInstanceCount": { "5": 3, @@ -630,7 +641,8 @@ "534352": 2, "56400": 2, "7000": 2, - "11155420": 2 + "11155420": 2, + "80002": 2 } }, { @@ -713,7 +725,11 @@ "insufficient funds for transfer", "insufficient funds for gas * price + value" ], - "NONCE_TOO_LOW": ["nonce too low", "nonce has already been used"], + "NONCE_TOO_LOW": [ + "nonce too low", + "nonce has already been used", + "lower than the current nonce of the account" + ], "MAX_PRIORITY_FEE_HIGHER_THAN_MAX_FEE": [ "max priority fee per gas higher than max fee per gas", "cannot be higher than the fee cap" @@ -915,7 +931,7 @@ 59140, 59144, 5000, 5001, 5611, 204, 84532, 168587773 ], "BNB": [97, 56], - "MATIC": [8995, 80001, 15001, 16110, 137], + "MATIC": [8995, 80001, 15001, 16110, 137, 80002], "BERA": [80085] }, "symbolMapByChainId": { @@ -939,7 +955,7 @@ 1, 5, 80001, 137, 97, 56, 1442, 1101, 421613, 42161, 420, 10, 43114, 84531, 8453, 59140, 59144, 592, 81, 204, 5611, 5000, 5001, 169, 3441005, 1115, 1116, 91715, 9980, 421614, 11155111, 84532, 168587773, 43113, - 80085, 81457, 42170, 534351, 534352, 56400, 7000, 11155420 + 80085, 81457, 42170, 534351, 534352, 56400, 7000, 11155420, 80002 ] }, "0x00000061fefce24a79343c27127435286bb7a4e1": { diff --git a/src/decrypt-config.ts b/src/decrypt-config.ts index 0db0e1cb..8db437bf 100644 --- a/src/decrypt-config.ts +++ b/src/decrypt-config.ts @@ -1,8 +1,5 @@ -import crypto from 'crypto-js'; -import fs, { existsSync } from 'fs'; -// import _ from 'lodash'; -// import path from 'path'; - +import crypto from "crypto-js"; +import fs, { existsSync } from "fs"; const KEY_SIZE = 32; const PBKDF2_ITERATIONS = 310000; @@ -10,57 +7,57 @@ const AES_PADDING = crypto.pad.Pkcs7; const AES_MODE = crypto.mode.CBC; function decryptConfig(): string { - // const encryptedEnvPath = './config.json.enc'; - const encryptedEnvPath = process.argv[2]; // Taking the second command line argument as configName - const passphrase = process.env.CONFIG_PASSPHRASE; - if (!passphrase) { - throw new Error('Passphrase for config required in .env file'); - } - - if (!existsSync(encryptedEnvPath)) { - throw new Error(`Invalid ENV Path: ${encryptedEnvPath}`); - } - const ciphertext = fs.readFileSync(encryptedEnvPath, 'utf8'); - // First 44 bits are Base64 encodded HMAC - const hashInBase64 = ciphertext.substr(0, 44); - - // Next 32 bits are the salt - const salt = crypto.enc.Hex.parse(ciphertext.substr(44, 32)); - - // Next 32 bits are the initialization vector - const iv = crypto.enc.Hex.parse(ciphertext.substr(44 + 32, 32)); - - // Rest is encrypted .env - const encrypted = ciphertext.substr(44 + 32 + 32); + // const encryptedEnvPath = './config.json.enc'; + const encryptedEnvPath = process.argv[2]; // Taking the second command line argument as configName + const passphrase = process.env.BUNDLER_CONFIG_PASSPHRASE; + if (!passphrase) { + throw new Error("Passphrase for config required in .env file"); + } - // Derive key from passphrase - const key = crypto.PBKDF2(passphrase, salt, { - keySize: KEY_SIZE / 32, - iterations: PBKDF2_ITERATIONS, + if (!existsSync(encryptedEnvPath)) { + throw new Error(`Invalid ENV Path: ${encryptedEnvPath}`); + } + const ciphertext = fs.readFileSync(encryptedEnvPath, "utf8"); + // First 44 bits are Base64 encodded HMAC + const hashInBase64 = ciphertext.substr(0, 44); + + // Next 32 bits are the salt + const salt = crypto.enc.Hex.parse(ciphertext.substr(44, 32)); + + // Next 32 bits are the initialization vector + const iv = crypto.enc.Hex.parse(ciphertext.substr(44 + 32, 32)); + + // Rest is encrypted .env + const encrypted = ciphertext.substr(44 + 32 + 32); + + // Derive key from passphrase + const key = crypto.PBKDF2(passphrase, salt, { + keySize: KEY_SIZE / 32, + iterations: PBKDF2_ITERATIONS, + }); + + let plaintext; + try { + const encryptedBytes = crypto.AES.decrypt(encrypted, key, { + iv, + padding: AES_PADDING, + mode: AES_MODE, }); + plaintext = encryptedBytes.toString(crypto.enc.Utf8); + } catch (e) { + console.log("Incorrect password for decryption"); + process.exit(); + } - let plaintext; - try { - const encryptedBytes = crypto.AES.decrypt(encrypted, key, { - iv, - padding: AES_PADDING, - mode: AES_MODE, - }); - plaintext = encryptedBytes.toString(crypto.enc.Utf8); - } catch (e) { - console.log('Incorrect password for decryption'); - process.exit(); - } - - // Verify HMAC - const decryptedHmac = crypto.HmacSHA256(plaintext, key); - const decryptedHmacInBase64 = crypto.enc.Base64.stringify(decryptedHmac); + // Verify HMAC + const decryptedHmac = crypto.HmacSHA256(plaintext, key); + const decryptedHmacInBase64 = crypto.enc.Base64.stringify(decryptedHmac); - if (decryptedHmacInBase64 !== hashInBase64) { - throw new Error('Error: HMAC does not match'); - } - console.log(plaintext); - return plaintext; + if (decryptedHmacInBase64 !== hashInBase64) { + throw new Error("Error: HMAC does not match"); } + console.log(plaintext); + return plaintext; +} -decryptConfig(); \ No newline at end of file +decryptConfig(); diff --git a/src/encrypt-config.ts b/src/encrypt-config.ts index 9559a510..9ee5bc56 100644 --- a/src/encrypt-config.ts +++ b/src/encrypt-config.ts @@ -1,3 +1,4 @@ +/* eslint-disable import/no-import-module-exports */ /* eslint-disable no-console */ import crypto from "crypto-js"; import { promises, existsSync } from "fs"; @@ -17,7 +18,7 @@ const encryptConfig = async ( } const plaintext = await promises.readFile(envPath, "utf8"); - // Derive a Key using the PBKDF2 algorithm from the passsphrase + // Derive a Key using the PBKDF2 algorithm from the passphrase const salt = crypto.lib.WordArray.random(128 / 8); const key = crypto.PBKDF2(passphrase, salt, { keySize: KEY_SIZE / 32, @@ -47,7 +48,7 @@ const encryptConfig = async ( process.exit(1); }; -const passphrase = process.env.CONFIG_PASSPHRASE; +const passphrase = process.env.BUNDLER_CONFIG_PASSPHRASE; if (passphrase !== undefined) { encryptConfig(passphrase); } else { diff --git a/src/relayer/relayer-manager/EVMRelayerManager.ts b/src/relayer/relayer-manager/EVMRelayerManager.ts index 75572e72..1096465a 100644 --- a/src/relayer/relayer-manager/EVMRelayerManager.ts +++ b/src/relayer/relayer-manager/EVMRelayerManager.ts @@ -363,7 +363,8 @@ export class EVMRelayerManager const seedInBuffer = Buffer.from(relayersMasterSeed, "utf-8"); const ethRoot = hdkey.fromMasterSeed(seedInBuffer); - const { nodePathIndex } = config.relayer; + const nodePathIndex = + process.env.BUNDLER_NODE_PATH_INDEX || config.relayer.nodePathIndex; const nodePath = `${nodePathRoot + nodePathIndex}/`; const ethNodePath: any = ethRoot.derive(nodePath + relayerIndex); const privateKey = ethNodePath._privateKey.toString("hex"); diff --git a/src/relayer/transaction-listener/EVMTransactionListener.ts b/src/relayer/transaction-listener/EVMTransactionListener.ts index f0cfac1f..05b1ddf3 100644 --- a/src/relayer/transaction-listener/EVMTransactionListener.ts +++ b/src/relayer/transaction-listener/EVMTransactionListener.ts @@ -136,21 +136,23 @@ export class EVMTransactionListener )}`, ); } - try { - await this.publishToTransactionQueue({ - transactionId, - relayerManagerName, - error, - event: SocketEventType.onTransactionError, - }); - } catch (publishToTransactionQueueError) { - log.error( - `publishToTransactionQueueError: ${parseError( - publishToTransactionQueueError, - )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ - this.chainId - }`, - ); + if (config.isTWSetup) { + try { + await this.publishToTransactionQueue({ + transactionId, + relayerManagerName, + error, + event: SocketEventType.onTransactionError, + }); + } catch (publishToTransactionQueueError) { + log.error( + `publishToTransactionQueueError: ${parseError( + publishToTransactionQueueError, + )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ + this.chainId + }`, + ); + } } log.error( `transactionExecutionResponse is null for transactionId: ${transactionId} for bundler: ${relayerAddress}`, @@ -219,25 +221,27 @@ export class EVMTransactionListener ); } - try { - // transaction queue is being listened by socket service to notify the client about the hash - await this.publishToTransactionQueue({ - transactionId, - relayerManagerName, - transactionHash, - receipt: undefined, - event: previousTransactionHash - ? SocketEventType.onTransactionHashChanged - : SocketEventType.onTransactionHashGenerated, - }); - } catch (publishToTransactionQueueError) { - log.error( - `publishToTransactionQueueError: ${parseError( - publishToTransactionQueueError, - )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ - this.chainId - }`, - ); + if (config.isTWSetup) { + try { + // transaction queue is being listened by socket service to notify the client about the hash + await this.publishToTransactionQueue({ + transactionId, + relayerManagerName, + transactionHash, + receipt: undefined, + event: previousTransactionHash + ? SocketEventType.onTransactionHashChanged + : SocketEventType.onTransactionHashGenerated, + }); + } catch (publishToTransactionQueueError) { + log.error( + `publishToTransactionQueueError: ${parseError( + publishToTransactionQueueError, + )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ + this.chainId + }`, + ); + } } // retry txn service will check for receipt log.info( @@ -644,22 +648,24 @@ export class EVMTransactionListener log.info( `Publishing to transaction queue on failure for transactionId: ${transactionId} to transaction queue on chainId ${this.chainId}`, ); - try { - await this.publishToTransactionQueue({ - transactionId, - relayerManagerName, - transactionHash, - receipt: transactionReceipt, - event: SocketEventType.onTransactionMined, - }); - } catch (publishToTransactionQueueError) { - log.error( - `publishToTransactionQueueError: ${parseError( - publishToTransactionQueueError, - )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ - this.chainId - }`, - ); + if (config.isTWSetup) { + try { + await this.publishToTransactionQueue({ + transactionId, + relayerManagerName, + transactionHash, + receipt: transactionReceipt, + event: SocketEventType.onTransactionMined, + }); + } catch (publishToTransactionQueueError) { + log.error( + `publishToTransactionQueueError: ${parseError( + publishToTransactionQueueError, + )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ + this.chainId + }`, + ); + } } log.info( @@ -736,24 +742,26 @@ export class EVMTransactionListener `Transaction got front runned. Publishing to transaction queue on failure for transactionId: ${transactionId} to transaction queue on chainId ${this.chainId}`, ); - try { - await this.publishToTransactionQueue({ - transactionId, - relayerManagerName, - transactionHash: ( - frontRunnedTransactionReceipt as TransactionReceipt - ).transactionHash, - receipt: transactionReceipt, - event: SocketEventType.onTransactionMined, - }); - } catch (publishToTransactionQueueError) { - log.error( - `publishToTransactionQueueError: ${parseError( - publishToTransactionQueueError, - )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ - this.chainId - }`, - ); + if (config.isTWSetup) { + try { + await this.publishToTransactionQueue({ + transactionId, + relayerManagerName, + transactionHash: ( + frontRunnedTransactionReceipt as TransactionReceipt + ).transactionHash, + receipt: transactionReceipt, + event: SocketEventType.onTransactionMined, + }); + } catch (publishToTransactionQueueError) { + log.error( + `publishToTransactionQueueError: ${parseError( + publishToTransactionQueueError, + )} while publishing to transaction queue for transactionId: ${transactionId} on chainId: ${ + this.chainId + }`, + ); + } } log.info( diff --git a/src/server/app.ts b/src/server/app.ts index 5fcebecb..097c7d22 100644 --- a/src/server/app.ts +++ b/src/server/app.ts @@ -82,6 +82,11 @@ app.route("/health").get((req, res) => { res.send("ok"); }); +app.route('/:chainId/health') + .get((req, res) => { + res.send('ok'); + }); + // error handler app.use( ( diff --git a/tsconfig.json b/tsconfig.json index ec79bb95..8465d647 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,7 +1,6 @@ { "compilerOptions": { /* Visit https://aka.ms/tsconfig.json to read more about this file */ - /* Projects */ // "incremental": true, /* Enable incremental compilation */ // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ diff --git a/yarn.lock b/yarn.lock index 851c8bc5..70501bf8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -53,17 +53,17 @@ zod "^3.22.4" "@ampproject/remapping@^2.2.0": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.2.1.tgz#99e8e11851128b8702cd57c33684f1d0f260b630" - integrity sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg== + version "2.3.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" + integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== dependencies: - "@jridgewell/gen-mapping" "^0.3.0" - "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.24" "@arbitrum/sdk@^3.1.3": - version "3.2.0" - resolved "https://registry.yarnpkg.com/@arbitrum/sdk/-/sdk-3.2.0.tgz#9cc2d2d1df929c7a1702f366a444d1439d17dc08" - integrity sha512-Y8NyL1EgWE8SGRFFxtqYIedQzKe7pMS9P9j0iDQ3eL1xvOf0t0WKb8vcta0cP2TcAOOpRZ3TWKPVfpBtwMsXCg== + version "3.3.3" + resolved "https://registry.yarnpkg.com/@arbitrum/sdk/-/sdk-3.3.3.tgz#9a49e753b9deeee6336f6a43acb0669961b91128" + integrity sha512-Fe/+h2gs78Efl94U7yXkYAN+FQLLAxDGj+kv6+CXbnUytso+PBvKgNj1e4YPwgtWLlDomdcOXFt/yaP4VKsrBQ== dependencies: "@ethersproject/address" "^5.0.8" "@ethersproject/bignumber" "^5.1.1" @@ -126,446 +126,434 @@ "@aws-sdk/util-utf8-browser" "^3.0.0" tslib "^1.11.1" -"@aws-sdk/client-cognito-identity@3.509.0": - version "3.509.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-cognito-identity/-/client-cognito-identity-3.509.0.tgz#d6284fbacc3456820446d51b74b87d2fedee015e" - integrity sha512-4gOFHM/u3DQB73ri15j+4As/mONj9Zmjze6SCQI9JwsW7WlOonDCsUQCKumEEnRGE8UQlqZAshIpAWWCrutLQw== +"@aws-sdk/client-cognito-identity@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-cognito-identity/-/client-cognito-identity-3.540.0.tgz#ccea36dd4650bdbd0fd2bcf02c3b1fb33ba154eb" + integrity sha512-03vUaIKjvdcOmjDi8Fv9JgY+VQrt9QBpRkI8A1lrdPNgWqTEZXZi/zBsFRsxTe6hgsrZtxVnxLu6krSRILuqtw== dependencies: "@aws-crypto/sha256-browser" "3.0.0" "@aws-crypto/sha256-js" "3.0.0" - "@aws-sdk/client-sts" "3.507.0" - "@aws-sdk/core" "3.496.0" - "@aws-sdk/credential-provider-node" "3.509.0" - "@aws-sdk/middleware-host-header" "3.502.0" - "@aws-sdk/middleware-logger" "3.502.0" - "@aws-sdk/middleware-recursion-detection" "3.502.0" - "@aws-sdk/middleware-signing" "3.502.0" - "@aws-sdk/middleware-user-agent" "3.502.0" - "@aws-sdk/region-config-resolver" "3.502.0" - "@aws-sdk/types" "3.502.0" - "@aws-sdk/util-endpoints" "3.502.0" - "@aws-sdk/util-user-agent-browser" "3.502.0" - "@aws-sdk/util-user-agent-node" "3.502.0" - "@smithy/config-resolver" "^2.1.1" - "@smithy/core" "^1.3.1" - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/hash-node" "^2.1.1" - "@smithy/invalid-dependency" "^2.1.1" - "@smithy/middleware-content-length" "^2.1.1" - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-retry" "^2.1.1" - "@smithy/middleware-serde" "^2.1.1" - "@smithy/middleware-stack" "^2.1.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - "@smithy/util-base64" "^2.1.1" - "@smithy/util-body-length-browser" "^2.1.1" - "@smithy/util-body-length-node" "^2.2.1" - "@smithy/util-defaults-mode-browser" "^2.1.1" - "@smithy/util-defaults-mode-node" "^2.1.1" - "@smithy/util-endpoints" "^1.1.1" - "@smithy/util-retry" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/client-sso-oidc@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.507.0.tgz#d1357d212d9510146d325ca1e6acd06d5744623b" - integrity sha512-ms5CH2ImhqqCIbo5irxayByuPOlVAmSiqDVfjZKwgIziqng2bVgNZMeKcT6t0bmrcgScEAVnZwY7j/iZTIw73g== + "@aws-sdk/client-sts" "3.540.0" + "@aws-sdk/core" "3.535.0" + "@aws-sdk/credential-provider-node" "3.540.0" + "@aws-sdk/middleware-host-header" "3.535.0" + "@aws-sdk/middleware-logger" "3.535.0" + "@aws-sdk/middleware-recursion-detection" "3.535.0" + "@aws-sdk/middleware-user-agent" "3.540.0" + "@aws-sdk/region-config-resolver" "3.535.0" + "@aws-sdk/types" "3.535.0" + "@aws-sdk/util-endpoints" "3.540.0" + "@aws-sdk/util-user-agent-browser" "3.535.0" + "@aws-sdk/util-user-agent-node" "3.535.0" + "@smithy/config-resolver" "^2.2.0" + "@smithy/core" "^1.4.0" + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/hash-node" "^2.2.0" + "@smithy/invalid-dependency" "^2.2.0" + "@smithy/middleware-content-length" "^2.2.0" + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-retry" "^2.2.0" + "@smithy/middleware-serde" "^2.3.0" + "@smithy/middleware-stack" "^2.2.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + "@smithy/util-base64" "^2.3.0" + "@smithy/util-body-length-browser" "^2.2.0" + "@smithy/util-body-length-node" "^2.3.0" + "@smithy/util-defaults-mode-browser" "^2.2.0" + "@smithy/util-defaults-mode-node" "^2.3.0" + "@smithy/util-endpoints" "^1.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-retry" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@aws-sdk/client-sso-oidc@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.540.0.tgz#e4c52889d33ca969add269011b790f2d634fb6d2" + integrity sha512-LZYK0lBRQK8D8M3Sqc96XiXkAV2v70zhTtF6weyzEpgwxZMfSuFJjs0jFyhaeZBZbZv7BBghIdhJ5TPavNxGMQ== dependencies: "@aws-crypto/sha256-browser" "3.0.0" "@aws-crypto/sha256-js" "3.0.0" - "@aws-sdk/client-sts" "3.507.0" - "@aws-sdk/core" "3.496.0" - "@aws-sdk/middleware-host-header" "3.502.0" - "@aws-sdk/middleware-logger" "3.502.0" - "@aws-sdk/middleware-recursion-detection" "3.502.0" - "@aws-sdk/middleware-signing" "3.502.0" - "@aws-sdk/middleware-user-agent" "3.502.0" - "@aws-sdk/region-config-resolver" "3.502.0" - "@aws-sdk/types" "3.502.0" - "@aws-sdk/util-endpoints" "3.502.0" - "@aws-sdk/util-user-agent-browser" "3.502.0" - "@aws-sdk/util-user-agent-node" "3.502.0" - "@smithy/config-resolver" "^2.1.1" - "@smithy/core" "^1.3.1" - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/hash-node" "^2.1.1" - "@smithy/invalid-dependency" "^2.1.1" - "@smithy/middleware-content-length" "^2.1.1" - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-retry" "^2.1.1" - "@smithy/middleware-serde" "^2.1.1" - "@smithy/middleware-stack" "^2.1.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - "@smithy/util-base64" "^2.1.1" - "@smithy/util-body-length-browser" "^2.1.1" - "@smithy/util-body-length-node" "^2.2.1" - "@smithy/util-defaults-mode-browser" "^2.1.1" - "@smithy/util-defaults-mode-node" "^2.1.1" - "@smithy/util-endpoints" "^1.1.1" - "@smithy/util-retry" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/client-sso@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.507.0.tgz#90a5de90f662aa680c0ffdc5e04695734ca8afb2" - integrity sha512-pFeaKwqv4tXD6QVxWC2V4N62DUoP3bPSm/mCe2SPhaNjNsmwwA53viUHz/nwxIbs8w4vV44UQsygb0AgKm+HoQ== + "@aws-sdk/client-sts" "3.540.0" + "@aws-sdk/core" "3.535.0" + "@aws-sdk/middleware-host-header" "3.535.0" + "@aws-sdk/middleware-logger" "3.535.0" + "@aws-sdk/middleware-recursion-detection" "3.535.0" + "@aws-sdk/middleware-user-agent" "3.540.0" + "@aws-sdk/region-config-resolver" "3.535.0" + "@aws-sdk/types" "3.535.0" + "@aws-sdk/util-endpoints" "3.540.0" + "@aws-sdk/util-user-agent-browser" "3.535.0" + "@aws-sdk/util-user-agent-node" "3.535.0" + "@smithy/config-resolver" "^2.2.0" + "@smithy/core" "^1.4.0" + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/hash-node" "^2.2.0" + "@smithy/invalid-dependency" "^2.2.0" + "@smithy/middleware-content-length" "^2.2.0" + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-retry" "^2.2.0" + "@smithy/middleware-serde" "^2.3.0" + "@smithy/middleware-stack" "^2.2.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + "@smithy/util-base64" "^2.3.0" + "@smithy/util-body-length-browser" "^2.2.0" + "@smithy/util-body-length-node" "^2.3.0" + "@smithy/util-defaults-mode-browser" "^2.2.0" + "@smithy/util-defaults-mode-node" "^2.3.0" + "@smithy/util-endpoints" "^1.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-retry" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@aws-sdk/client-sso@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.540.0.tgz#732a7f325de3905a719c20ce05e555b445f82b4a" + integrity sha512-rrQZMuw4sxIo3eyAUUzPQRA336mPRnrAeSlSdVHBKZD8Fjvoy0lYry2vNhkPLpFZLso1J66KRyuIv4LzRR3v1Q== dependencies: "@aws-crypto/sha256-browser" "3.0.0" "@aws-crypto/sha256-js" "3.0.0" - "@aws-sdk/core" "3.496.0" - "@aws-sdk/middleware-host-header" "3.502.0" - "@aws-sdk/middleware-logger" "3.502.0" - "@aws-sdk/middleware-recursion-detection" "3.502.0" - "@aws-sdk/middleware-user-agent" "3.502.0" - "@aws-sdk/region-config-resolver" "3.502.0" - "@aws-sdk/types" "3.502.0" - "@aws-sdk/util-endpoints" "3.502.0" - "@aws-sdk/util-user-agent-browser" "3.502.0" - "@aws-sdk/util-user-agent-node" "3.502.0" - "@smithy/config-resolver" "^2.1.1" - "@smithy/core" "^1.3.1" - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/hash-node" "^2.1.1" - "@smithy/invalid-dependency" "^2.1.1" - "@smithy/middleware-content-length" "^2.1.1" - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-retry" "^2.1.1" - "@smithy/middleware-serde" "^2.1.1" - "@smithy/middleware-stack" "^2.1.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - "@smithy/util-base64" "^2.1.1" - "@smithy/util-body-length-browser" "^2.1.1" - "@smithy/util-body-length-node" "^2.2.1" - "@smithy/util-defaults-mode-browser" "^2.1.1" - "@smithy/util-defaults-mode-node" "^2.1.1" - "@smithy/util-endpoints" "^1.1.1" - "@smithy/util-retry" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/client-sts@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sts/-/client-sts-3.507.0.tgz#0a99b5b04ca8d2e30a52840cc67181b3f2ac990a" - integrity sha512-TOWBe0ApEh32QOib0R+irWGjd1F9wnhbGV5PcB9SakyRwvqwG5MKOfYxG7ocoDqLlaRwzZMidcy/PV8/OEVNKg== + "@aws-sdk/core" "3.535.0" + "@aws-sdk/middleware-host-header" "3.535.0" + "@aws-sdk/middleware-logger" "3.535.0" + "@aws-sdk/middleware-recursion-detection" "3.535.0" + "@aws-sdk/middleware-user-agent" "3.540.0" + "@aws-sdk/region-config-resolver" "3.535.0" + "@aws-sdk/types" "3.535.0" + "@aws-sdk/util-endpoints" "3.540.0" + "@aws-sdk/util-user-agent-browser" "3.535.0" + "@aws-sdk/util-user-agent-node" "3.535.0" + "@smithy/config-resolver" "^2.2.0" + "@smithy/core" "^1.4.0" + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/hash-node" "^2.2.0" + "@smithy/invalid-dependency" "^2.2.0" + "@smithy/middleware-content-length" "^2.2.0" + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-retry" "^2.2.0" + "@smithy/middleware-serde" "^2.3.0" + "@smithy/middleware-stack" "^2.2.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + "@smithy/util-base64" "^2.3.0" + "@smithy/util-body-length-browser" "^2.2.0" + "@smithy/util-body-length-node" "^2.3.0" + "@smithy/util-defaults-mode-browser" "^2.2.0" + "@smithy/util-defaults-mode-node" "^2.3.0" + "@smithy/util-endpoints" "^1.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-retry" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@aws-sdk/client-sts@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sts/-/client-sts-3.540.0.tgz#16ce14db1c5387be3ad9be6dd4f8ed33b63193c8" + integrity sha512-ITHUQxvpqfQX6obfpIi3KYGzZYfe/I5Ixjfxoi5lB7ISCtmxqObKB1fzD93wonkMJytJ7LUO8panZl/ojiJ1uw== dependencies: "@aws-crypto/sha256-browser" "3.0.0" "@aws-crypto/sha256-js" "3.0.0" - "@aws-sdk/core" "3.496.0" - "@aws-sdk/middleware-host-header" "3.502.0" - "@aws-sdk/middleware-logger" "3.502.0" - "@aws-sdk/middleware-recursion-detection" "3.502.0" - "@aws-sdk/middleware-user-agent" "3.502.0" - "@aws-sdk/region-config-resolver" "3.502.0" - "@aws-sdk/types" "3.502.0" - "@aws-sdk/util-endpoints" "3.502.0" - "@aws-sdk/util-user-agent-browser" "3.502.0" - "@aws-sdk/util-user-agent-node" "3.502.0" - "@smithy/config-resolver" "^2.1.1" - "@smithy/core" "^1.3.1" - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/hash-node" "^2.1.1" - "@smithy/invalid-dependency" "^2.1.1" - "@smithy/middleware-content-length" "^2.1.1" - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-retry" "^2.1.1" - "@smithy/middleware-serde" "^2.1.1" - "@smithy/middleware-stack" "^2.1.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - "@smithy/util-base64" "^2.1.1" - "@smithy/util-body-length-browser" "^2.1.1" - "@smithy/util-body-length-node" "^2.2.1" - "@smithy/util-defaults-mode-browser" "^2.1.1" - "@smithy/util-defaults-mode-node" "^2.1.1" - "@smithy/util-endpoints" "^1.1.1" - "@smithy/util-middleware" "^2.1.1" - "@smithy/util-retry" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" + "@aws-sdk/core" "3.535.0" + "@aws-sdk/middleware-host-header" "3.535.0" + "@aws-sdk/middleware-logger" "3.535.0" + "@aws-sdk/middleware-recursion-detection" "3.535.0" + "@aws-sdk/middleware-user-agent" "3.540.0" + "@aws-sdk/region-config-resolver" "3.535.0" + "@aws-sdk/types" "3.535.0" + "@aws-sdk/util-endpoints" "3.540.0" + "@aws-sdk/util-user-agent-browser" "3.535.0" + "@aws-sdk/util-user-agent-node" "3.535.0" + "@smithy/config-resolver" "^2.2.0" + "@smithy/core" "^1.4.0" + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/hash-node" "^2.2.0" + "@smithy/invalid-dependency" "^2.2.0" + "@smithy/middleware-content-length" "^2.2.0" + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-retry" "^2.2.0" + "@smithy/middleware-serde" "^2.3.0" + "@smithy/middleware-stack" "^2.2.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + "@smithy/util-base64" "^2.3.0" + "@smithy/util-body-length-browser" "^2.2.0" + "@smithy/util-body-length-node" "^2.3.0" + "@smithy/util-defaults-mode-browser" "^2.2.0" + "@smithy/util-defaults-mode-node" "^2.3.0" + "@smithy/util-endpoints" "^1.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-retry" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@aws-sdk/core@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.535.0.tgz#f3a726c297cea9634d19a1db4e958c918c506c8b" + integrity sha512-+Yusa9HziuaEDta1UaLEtMAtmgvxdxhPn7jgfRY6PplqAqgsfa5FR83sxy5qr2q7xjQTwHtV4MjQVuOjG9JsLw== + dependencies: + "@smithy/core" "^1.4.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/signature-v4" "^2.2.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" fast-xml-parser "4.2.5" - tslib "^2.5.0" - -"@aws-sdk/core@3.496.0": - version "3.496.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.496.0.tgz#ec1394753b6b2f6e38aea593e30b2db5c7390969" - integrity sha512-yT+ug7Cw/3eJi7x2es0+46x12+cIJm5Xv+GPWsrTFD1TKgqO/VPEgfDtHFagDNbFmjNQA65Ygc/kEdIX9ICX/A== - dependencies: - "@smithy/core" "^1.3.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/signature-v4" "^2.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-cognito-identity@3.509.0": - version "3.509.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-cognito-identity/-/credential-provider-cognito-identity-3.509.0.tgz#69cd9e59a166407d85ec4c5f2f446401d014c18e" - integrity sha512-cQEwOoNzdN9vPTiDZt34EZNL1qXMk2lnsg9U1yEeVwvfur/5G/D0Kd1uvJmPXEtZOcJklPKAPrcvCejVmZuN3A== - dependencies: - "@aws-sdk/client-cognito-identity" "3.509.0" - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-env@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.502.0.tgz#800e63b2b9d90b078a120d474d5a3b1ec5b48514" - integrity sha512-KIB8Ae1Z7domMU/jU4KiIgK4tmYgvuXlhR54ehwlVHxnEoFPoPuGHFZU7oFn79jhhSLUFQ1lRYMxP0cEwb7XeQ== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-http@3.503.1": - version "3.503.1" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.503.1.tgz#e882a4b740c9193650053033b3001b03ca4b12c8" - integrity sha512-rTdlFFGoPPFMF2YjtlfRuSgKI+XsF49u7d98255hySwhsbwd3Xp+utTTPquxP+CwDxMHbDlI7NxDzFiFdsoZug== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/util-stream" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-ini@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.507.0.tgz#c2b9cd1bf172a0057bf0ad888c19ce5450df13f2" - integrity sha512-2CnyduoR9COgd7qH1LPYK8UggGqVs8R4ASDMB5bwGxbg9ZerlStDiHpqvJNNg1k+VlejBr++utxfmHd236XgmQ== - dependencies: - "@aws-sdk/client-sts" "3.507.0" - "@aws-sdk/credential-provider-env" "3.502.0" - "@aws-sdk/credential-provider-process" "3.502.0" - "@aws-sdk/credential-provider-sso" "3.507.0" - "@aws-sdk/credential-provider-web-identity" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/credential-provider-imds" "^2.2.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-node@3.509.0": - version "3.509.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.509.0.tgz#0a65ce0e8c20312b00654e12ff95f80f3317dc82" - integrity sha512-uXT8wIq1k+m0mS/pC9U1cUTIjUB7/4PgxyiYsTxYPIULtWnQXltAlcPU3QzKTJMP60sqftRYZ2jFDLAVsipQxw== - dependencies: - "@aws-sdk/credential-provider-env" "3.502.0" - "@aws-sdk/credential-provider-http" "3.503.1" - "@aws-sdk/credential-provider-ini" "3.507.0" - "@aws-sdk/credential-provider-process" "3.502.0" - "@aws-sdk/credential-provider-sso" "3.507.0" - "@aws-sdk/credential-provider-web-identity" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/credential-provider-imds" "^2.2.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-process@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.502.0.tgz#6c41d8845a1c7073491a064c158363de04640381" - integrity sha512-fJJowOjQ4infYQX0E1J3xFVlmuwEYJAFk0Mo1qwafWmEthsBJs+6BR2RiWDELHKrSK35u4Pf3fu3RkYuCtmQFw== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-sso@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.507.0.tgz#e98cf7fad69b4c12aa85c44affe9aae4cc81d796" - integrity sha512-6WBjou52QukFpDi4ezb19bcAx/bM8ge8qnJnRT02WVRmU6zFQ5yLD2fW1MFsbX3cwbey+wSqKd5FGE1Hukd5wQ== - dependencies: - "@aws-sdk/client-sso" "3.507.0" - "@aws-sdk/token-providers" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/credential-provider-web-identity@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.507.0.tgz#22e028e2dd2a0a927707da1408099bc4f5b7a606" - integrity sha512-f+aGMfazBimX7S06224JRYzGTaMh1uIhfj23tZylPJ05KxTVi5IO1RoqeI/uHLJ+bDOx+JHBC04g/oCdO4kHvw== - dependencies: - "@aws-sdk/client-sts" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-cognito-identity@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-cognito-identity/-/credential-provider-cognito-identity-3.540.0.tgz#9b6a309d3809a6a056b6b6c9e7ac580cda19db31" + integrity sha512-XOTAIuVgticX+43GMpRbi5OHmJAhHfoHYsVGu0eRLhri1yFqUHXJgHUd51QQtlA8cFQN7JnFFM6sF5EDCPF49g== + dependencies: + "@aws-sdk/client-cognito-identity" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-env@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.535.0.tgz#26248e263a8107953d5496cb3760d4e7c877abcf" + integrity sha512-XppwO8c0GCGSAvdzyJOhbtktSEaShg14VJKg8mpMa1XcgqzmcqqHQjtDWbx5rZheY1VdpXZhpEzJkB6LpQejpA== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-http@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.535.0.tgz#0a42f6b1a61d927bbce9f4afd25112f486bd05da" + integrity sha512-kdj1wCmOMZ29jSlUskRqN04S6fJ4dvt0Nq9Z32SA6wO7UG8ht6Ot9h/au/eTWJM3E1somZ7D771oK7dQt9b8yw== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-stream" "^2.2.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-ini@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.540.0.tgz#8e17b23bf242152775db1473f7d2952beb6a5ef9" + integrity sha512-igN/RbsnulIBwqXbwsWmR3srqmtbPF1dm+JteGvUY31FW65fTVvWvSr945Y/cf1UbhPmIQXntlsqESqpkhTHwg== + dependencies: + "@aws-sdk/client-sts" "3.540.0" + "@aws-sdk/credential-provider-env" "3.535.0" + "@aws-sdk/credential-provider-process" "3.535.0" + "@aws-sdk/credential-provider-sso" "3.540.0" + "@aws-sdk/credential-provider-web-identity" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/credential-provider-imds" "^2.3.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-node@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.540.0.tgz#e6fd3404de68e7f9580f01aa542b16e9abc58e5c" + integrity sha512-HKQZJbLHlrHX9A0B1poiYNXIIQfy8whTjuosTCYKPDBhhUyVAQfxy/KG726j0v43IhaNPLgTGZCJve4hAsazSw== + dependencies: + "@aws-sdk/credential-provider-env" "3.535.0" + "@aws-sdk/credential-provider-http" "3.535.0" + "@aws-sdk/credential-provider-ini" "3.540.0" + "@aws-sdk/credential-provider-process" "3.535.0" + "@aws-sdk/credential-provider-sso" "3.540.0" + "@aws-sdk/credential-provider-web-identity" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/credential-provider-imds" "^2.3.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-process@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.535.0.tgz#ea1e8a38a32e36bbdc3f75eb03352e6eafa0c659" + integrity sha512-9O1OaprGCnlb/kYl8RwmH7Mlg8JREZctB8r9sa1KhSsWFq/SWO0AuJTyowxD7zL5PkeS4eTvzFFHWCa3OO5epA== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-sso@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.540.0.tgz#1fc5c53a0df8227249c73a3cb7660b1accb79186" + integrity sha512-tKkFqK227LF5ajc5EL6asXS32p3nkofpP8G7NRpU7zOEOQCg01KUc4JRX+ItI0T007CiN1J19yNoFqHLT/SqHg== + dependencies: + "@aws-sdk/client-sso" "3.540.0" + "@aws-sdk/token-providers" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/credential-provider-web-identity@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.540.0.tgz#775a2090e9f4f89efe2ebdf1e2c109a47561c0e9" + integrity sha512-OpDm9w3A168B44hSjpnvECP4rvnFzD86rN4VYdGADuCvEa5uEcdA/JuT5WclFPDqdWEmFBqS1pxBIJBf0g2Q9Q== + dependencies: + "@aws-sdk/client-sts" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" "@aws-sdk/credential-providers@^3.186.0": - version "3.509.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-providers/-/credential-providers-3.509.0.tgz#6f2f26163e46aff7e093979a9afbf1fcb1ee1594" - integrity sha512-NqmWeLmk+1SJF4hvJU62Mv/+O535Ge7HL5MPheDpkhGZAN4eCeXgJvvuUDOzJOLijHlH9V8xCi/XG33KQ2Kvmg== - dependencies: - "@aws-sdk/client-cognito-identity" "3.509.0" - "@aws-sdk/client-sso" "3.507.0" - "@aws-sdk/client-sts" "3.507.0" - "@aws-sdk/credential-provider-cognito-identity" "3.509.0" - "@aws-sdk/credential-provider-env" "3.502.0" - "@aws-sdk/credential-provider-http" "3.503.1" - "@aws-sdk/credential-provider-ini" "3.507.0" - "@aws-sdk/credential-provider-node" "3.509.0" - "@aws-sdk/credential-provider-process" "3.502.0" - "@aws-sdk/credential-provider-sso" "3.507.0" - "@aws-sdk/credential-provider-web-identity" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/credential-provider-imds" "^2.2.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/middleware-host-header@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-host-header/-/middleware-host-header-3.502.0.tgz#2651fb3509990271c89eb50133fb17cb8ae435f6" - integrity sha512-EjnG0GTYXT/wJBmm5/mTjDcAkzU8L7wQjOzd3FTXuTCNNyvAvwrszbOj5FlarEw5XJBbQiZtBs+I5u9+zy560w== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/protocol-http" "^3.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/middleware-logger@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-logger/-/middleware-logger-3.502.0.tgz#558cefdd233779f15687957f9f07497199b22d72" - integrity sha512-FDyv6K4nCoHxbjLGS2H8ex8I0KDIiu4FJgVRPs140ZJy6gE5Pwxzv6YTzZGLMrnqcIs9gh065Lf6DjwMelZqaw== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/middleware-recursion-detection@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.502.0.tgz#c22e2c0c1d551e58c788264687324bb7186af2cc" - integrity sha512-hvbyGJbxeuezxOu8VfFmcV4ql1hKXLxHTe5FNYfEBat2KaZXVhc1Hg+4TvB06/53p+E8J99Afmumkqbxs2esUA== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/protocol-http" "^3.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/middleware-signing@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-signing/-/middleware-signing-3.502.0.tgz#48b3503147eecb1a53a63633462de353668f635a" - integrity sha512-4hF08vSzJ7L6sB+393gOFj3s2N6nLusYS0XrMW6wYNFU10IDdbf8Z3TZ7gysDJJHEGQPmTAesPEDBsasGWcMxg== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/signature-v4" "^2.1.1" - "@smithy/types" "^2.9.1" - "@smithy/util-middleware" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/middleware-user-agent@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.502.0.tgz#dd740f150d6f3110cf5b08fedf361d202f899c93" - integrity sha512-TxbBZbRiXPH0AUxegqiNd9aM9zNSbfjtBs5MEfcBsweeT/B2O7K1EjP9+CkB8Xmk/5FLKhAKLr19b1TNoE27rw== - dependencies: - "@aws-sdk/types" "3.502.0" - "@aws-sdk/util-endpoints" "3.502.0" - "@smithy/protocol-http" "^3.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/region-config-resolver@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/region-config-resolver/-/region-config-resolver-3.502.0.tgz#c18a04060879eb03c47c05b05fc296119ee073ba" - integrity sha512-mxmsX2AGgnSM+Sah7mcQCIneOsJQNiLX0COwEttuf8eO+6cLMAZvVudH3BnWTfea4/A9nuri9DLCqBvEmPrilg== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/types" "^2.9.1" - "@smithy/util-config-provider" "^2.2.1" - "@smithy/util-middleware" "^2.1.1" - tslib "^2.5.0" - -"@aws-sdk/token-providers@3.507.0": - version "3.507.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.507.0.tgz#7456cec822a7f59a4b58a2eda1a0ff963c4c3c6b" - integrity sha512-ehOINGjoGJc6Puzon7ev4bXckkaZx18WNgMTNttYJhj3vTpj5LPSQbI/5SS927bEbpGMFz1+hJ6Ra5WGfbTcEQ== - dependencies: - "@aws-sdk/client-sso-oidc" "3.507.0" - "@aws-sdk/types" "3.502.0" - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/types@3.502.0", "@aws-sdk/types@^3.222.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/types/-/types-3.502.0.tgz#c23dda4df7fdbe32642d4f5ab23516f455fb6aba" - integrity sha512-M0DSPYe/gXhwD2QHgoukaZv5oDxhW3FfvYIrJptyqUq3OnPJBcDbihHjrE0PBtfh/9kgMZT60/fQ2NVFANfa2g== - dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@aws-sdk/util-endpoints@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-endpoints/-/util-endpoints-3.502.0.tgz#aee818c0c53dfedfd49599fc260cd880faea5e82" - integrity sha512-6LKFlJPp2J24r1Kpfoz5ESQn+1v5fEjDB3mtUKRdpwarhm3syu7HbKlHCF3KbcCOyahobvLvhoedT78rJFEeeg== - dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/types" "^2.9.1" - "@smithy/util-endpoints" "^1.1.1" - tslib "^2.5.0" + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-providers/-/credential-providers-3.540.0.tgz#649725589d210bc1237e2d3d8893302ef3a79ecf" + integrity sha512-tAmvqdZngCrER5/AAwTmDSjO05LGIshKL+lwcJr2OUV5jtQVzfbFrorf+b5dnI+3i8+zGcEAV9omra4XGrO9Kg== + dependencies: + "@aws-sdk/client-cognito-identity" "3.540.0" + "@aws-sdk/client-sso" "3.540.0" + "@aws-sdk/client-sts" "3.540.0" + "@aws-sdk/credential-provider-cognito-identity" "3.540.0" + "@aws-sdk/credential-provider-env" "3.535.0" + "@aws-sdk/credential-provider-http" "3.535.0" + "@aws-sdk/credential-provider-ini" "3.540.0" + "@aws-sdk/credential-provider-node" "3.540.0" + "@aws-sdk/credential-provider-process" "3.535.0" + "@aws-sdk/credential-provider-sso" "3.540.0" + "@aws-sdk/credential-provider-web-identity" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/credential-provider-imds" "^2.3.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/middleware-host-header@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-host-header/-/middleware-host-header-3.535.0.tgz#d5264f813592f5e77df25e5a14bbb0e6441812db" + integrity sha512-0h6TWjBWtDaYwHMQJI9ulafeS4lLaw1vIxRjbpH0svFRt6Eve+Sy8NlVhECfTU2hNz/fLubvrUxsXoThaLBIew== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/middleware-logger@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-logger/-/middleware-logger-3.535.0.tgz#1a8ffd6c368edd6cb32e1edf7b1dced95c1820ee" + integrity sha512-huNHpONOrEDrdRTvSQr1cJiRMNf0S52NDXtaPzdxiubTkP+vni2MohmZANMOai/qT0olmEVX01LhZ0ZAOgmg6A== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/middleware-recursion-detection@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.535.0.tgz#6aa1e1bd1e84730d58a73021b745e20d4341a92d" + integrity sha512-am2qgGs+gwqmR4wHLWpzlZ8PWhm4ktj5bYSgDrsOfjhdBlWNxvPoID9/pDAz5RWL48+oH7I6SQzMqxXsFDikrw== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/middleware-user-agent@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.540.0.tgz#4981c64c1eeb6b5c453bce02d060b8c71d44994d" + integrity sha512-8Rd6wPeXDnOYzWj1XCmOKcx/Q87L0K1/EHqOBocGjLVbN3gmRxBvpmR1pRTjf7IsWfnnzN5btqtcAkfDPYQUMQ== + dependencies: + "@aws-sdk/types" "3.535.0" + "@aws-sdk/util-endpoints" "3.540.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/region-config-resolver@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/region-config-resolver/-/region-config-resolver-3.535.0.tgz#20a30fb5fbbe27ab70f2ed16327bae7e367b5cec" + integrity sha512-IXOznDiaItBjsQy4Fil0kzX/J3HxIOknEphqHbOfUf+LpA5ugcsxuQQONrbEQusCBnfJyymrldBvBhFmtlU9Wg== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/types" "^2.12.0" + "@smithy/util-config-provider" "^2.3.0" + "@smithy/util-middleware" "^2.2.0" + tslib "^2.6.2" + +"@aws-sdk/token-providers@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.540.0.tgz#06fb874a62d3c496875768ac648bc6cca4c75a79" + integrity sha512-9BvtiVEZe5Ev88Wa4ZIUbtT6BVcPwhxmVInQ6c12MYNb0WNL54BN6wLy/eknAfF05gpX2/NDU2pUDOyMPdm/+g== + dependencies: + "@aws-sdk/client-sso-oidc" "3.540.0" + "@aws-sdk/types" "3.535.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/types@3.535.0", "@aws-sdk/types@^3.222.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/types/-/types-3.535.0.tgz#5e6479f31299dd9df170e63f4d10fe739008cf04" + integrity sha512-aY4MYfduNj+sRR37U7XxYR8wemfbKP6lx00ze2M2uubn7mZotuVrWYAafbMSXrdEMSToE5JDhr28vArSOoLcSg== + dependencies: + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@aws-sdk/util-endpoints@3.540.0": + version "3.540.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-endpoints/-/util-endpoints-3.540.0.tgz#a7fea1d2a5e64623353aaa6ee32dbb86ab9cd3f8" + integrity sha512-1kMyQFAWx6f8alaI6UT65/5YW/7pDWAKAdNwL6vuJLea03KrZRX3PMoONOSJpAS5m3Ot7HlWZvf3wZDNTLELZw== + dependencies: + "@aws-sdk/types" "3.535.0" + "@smithy/types" "^2.12.0" + "@smithy/util-endpoints" "^1.2.0" + tslib "^2.6.2" "@aws-sdk/util-locate-window@^3.0.0": - version "3.495.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-locate-window/-/util-locate-window-3.495.0.tgz#9034fd8db77991b28ed20e067acdd53e8b8f824b" - integrity sha512-MfaPXT0kLX2tQaR90saBT9fWQq2DHqSSJRzW+MZWsmF+y5LGCOhO22ac/2o6TKSQm7h0HRc2GaADqYYYor62yg== + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-locate-window/-/util-locate-window-3.535.0.tgz#0200a336fddd47dd6567ce15d01f62be50a315d7" + integrity sha512-PHJ3SL6d2jpcgbqdgiPxkXpu7Drc2PYViwxSIqvvMKhDwzSB1W3mMvtpzwKM4IE7zLFodZo0GKjJ9AsoXndXhA== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@aws-sdk/util-user-agent-browser@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.502.0.tgz#87b42abff6944052c78a84981637ac21859dd016" - integrity sha512-v8gKyCs2obXoIkLETAeEQ3AM+QmhHhst9xbM1cJtKUGsRlVIak/XyyD+kVE6kmMm1cjfudHpHKABWk9apQcIZQ== +"@aws-sdk/util-user-agent-browser@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.535.0.tgz#d67d72e8b933051620f18ddb1c2be225f79f588f" + integrity sha512-RWMcF/xV5n+nhaA/Ff5P3yNP3Kur/I+VNZngog4TEs92oB/nwOdAg/2JL8bVAhUbMrjTjpwm7PItziYFQoqyig== dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/types" "^2.9.1" + "@aws-sdk/types" "3.535.0" + "@smithy/types" "^2.12.0" bowser "^2.11.0" - tslib "^2.5.0" + tslib "^2.6.2" -"@aws-sdk/util-user-agent-node@3.502.0": - version "3.502.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.502.0.tgz#04ac4d0371d4f243f12ddc23b42ca8ceb27dfad9" - integrity sha512-9RjxpkGZKbTdl96tIJvAo+vZoz4P/cQh36SBUt9xfRfW0BtsaLyvSrvlR5wyUYhvRcC12Axqh/8JtnAPq//+Vw== +"@aws-sdk/util-user-agent-node@3.535.0": + version "3.535.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.535.0.tgz#f5c26fb6f3f561d3cf35f96f303b1775afad0a5b" + integrity sha512-dRek0zUuIT25wOWJlsRm97nTkUlh1NDcLsQZIN2Y8KxhwoXXWtJs5vaDPT+qAg+OpcNj80i1zLR/CirqlFg/TQ== dependencies: - "@aws-sdk/types" "3.502.0" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@aws-sdk/types" "3.535.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" "@aws-sdk/util-utf8-browser@^3.0.0": version "3.259.0" @@ -574,48 +562,48 @@ dependencies: tslib "^2.3.1" -"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.23.5": - version "7.23.5" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.5.tgz#9009b69a8c602293476ad598ff53e4562e15c244" - integrity sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA== +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.23.5", "@babel/code-frame@^7.24.1", "@babel/code-frame@^7.24.2": + version "7.24.2" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.2.tgz#718b4b19841809a58b29b68cde80bc5e1aa6d9ae" + integrity sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ== dependencies: - "@babel/highlight" "^7.23.4" - chalk "^2.4.2" + "@babel/highlight" "^7.24.2" + picocolors "^1.0.0" "@babel/compat-data@^7.23.5": - version "7.23.5" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98" - integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw== + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.24.1.tgz#31c1f66435f2a9c329bb5716a6d6186c516c3742" + integrity sha512-Pc65opHDliVpRHuKfzI+gSA4zcgr65O4cl64fFJIWEEh8JoHIHh0Oez1Eo8Arz8zq/JhgKodQaxEwUPRtZylVA== -"@babel/core@^7.11.6", "@babel/core@^7.12.3": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.23.9.tgz#b028820718000f267870822fec434820e9b1e4d1" - integrity sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw== +"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.23.9": + version "7.24.3" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.24.3.tgz#568864247ea10fbd4eff04dda1e05f9e2ea985c3" + integrity sha512-5FcvN1JHw2sHJChotgx8Ek0lyuh4kCKelgMTTqhYJJtloNvUfpAFMeNQUtdlIaktwrSV9LtCdqwk48wL2wBacQ== dependencies: "@ampproject/remapping" "^2.2.0" - "@babel/code-frame" "^7.23.5" - "@babel/generator" "^7.23.6" + "@babel/code-frame" "^7.24.2" + "@babel/generator" "^7.24.1" "@babel/helper-compilation-targets" "^7.23.6" "@babel/helper-module-transforms" "^7.23.3" - "@babel/helpers" "^7.23.9" - "@babel/parser" "^7.23.9" - "@babel/template" "^7.23.9" - "@babel/traverse" "^7.23.9" - "@babel/types" "^7.23.9" + "@babel/helpers" "^7.24.1" + "@babel/parser" "^7.24.1" + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.1" + "@babel/types" "^7.24.0" convert-source-map "^2.0.0" debug "^4.1.0" gensync "^1.0.0-beta.2" json5 "^2.2.3" semver "^6.3.1" -"@babel/generator@^7.23.6", "@babel/generator@^7.7.2": - version "7.23.6" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.6.tgz#9e1fca4811c77a10580d17d26b57b036133f3c2e" - integrity sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw== +"@babel/generator@^7.24.1", "@babel/generator@^7.7.2": + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.24.1.tgz#e67e06f68568a4ebf194d1c6014235344f0476d0" + integrity sha512-DfCRfZsBcrPEHUfuBMgbJ1Ut01Y/itOs+hY2nFLgqsqXd52/iSiVq5TITtUasIUgm+IIKdY2/1I7auiQOEeC9A== dependencies: - "@babel/types" "^7.23.6" - "@jridgewell/gen-mapping" "^0.3.2" - "@jridgewell/trace-mapping" "^0.3.17" + "@babel/types" "^7.24.0" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" jsesc "^2.5.1" "@babel/helper-compilation-targets@^7.23.6": @@ -650,11 +638,11 @@ "@babel/types" "^7.22.5" "@babel/helper-module-imports@^7.22.15": - version "7.22.15" - resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz#16146307acdc40cc00c3b2c647713076464bdbf0" - integrity sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w== + version "7.24.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz#6ac476e6d168c7c23ff3ba3cf4f7841d46ac8128" + integrity sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg== dependencies: - "@babel/types" "^7.22.15" + "@babel/types" "^7.24.0" "@babel/helper-module-transforms@^7.23.3": version "7.23.3" @@ -667,10 +655,10 @@ "@babel/helper-split-export-declaration" "^7.22.6" "@babel/helper-validator-identifier" "^7.22.20" -"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.8.0": - version "7.22.5" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz#dd7ee3735e8a313b9f7b05a773d892e88e6d7295" - integrity sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg== +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.24.0", "@babel/helper-plugin-utils@^7.8.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.0.tgz#945681931a52f15ce879fd5b86ce2dae6d3d7f2a" + integrity sha512-9cUznXMG0+FxRuJfvL82QlTqIzhVW9sL0KjMPHhAOOvpQGL8QtdxnBKILjBqxlHyliz0yCa1G903ZXI/FuHy2w== "@babel/helper-simple-access@^7.22.5": version "7.22.5" @@ -687,9 +675,9 @@ "@babel/types" "^7.22.5" "@babel/helper-string-parser@^7.23.4": - version "7.23.4" - resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz#9478c707febcbbe1ddb38a3d91a2e054ae622d83" - integrity sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ== + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz#f99c36d3593db9540705d0739a1f10b5e20c696e" + integrity sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ== "@babel/helper-validator-identifier@^7.22.20": version "7.22.20" @@ -701,28 +689,29 @@ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307" integrity sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw== -"@babel/helpers@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.23.9.tgz#c3e20bbe7f7a7e10cb9b178384b4affdf5995c7d" - integrity sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ== +"@babel/helpers@^7.24.1": + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.24.1.tgz#183e44714b9eba36c3038e442516587b1e0a1a94" + integrity sha512-BpU09QqEe6ZCHuIHFphEFgvNSrubve1FtyMton26ekZ85gRGi6LrTF7zArARp2YvyFxloeiRmtSCq5sjh1WqIg== dependencies: - "@babel/template" "^7.23.9" - "@babel/traverse" "^7.23.9" - "@babel/types" "^7.23.9" + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.1" + "@babel/types" "^7.24.0" -"@babel/highlight@^7.23.4": - version "7.23.4" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.23.4.tgz#edaadf4d8232e1a961432db785091207ead0621b" - integrity sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A== +"@babel/highlight@^7.24.2": + version "7.24.2" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.2.tgz#3f539503efc83d3c59080a10e6634306e0370d26" + integrity sha512-Yac1ao4flkTxTteCDZLEvdxg2fZfz1v8M4QpaGypq/WPDqg3ijHYbDfs+LG5hvzSoqaSZ9/Z9lKSP3CjZjv+pA== dependencies: "@babel/helper-validator-identifier" "^7.22.20" chalk "^2.4.2" js-tokens "^4.0.0" + picocolors "^1.0.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.23.9.tgz#7b903b6149b0f8fa7ad564af646c4c38a77fc44b" - integrity sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA== +"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0", "@babel/parser@^7.24.1": + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.1.tgz#1e416d3627393fab1cb5b0f2f1796a100ae9133a" + integrity sha512-Zo9c7N3xdOIQrNip7Lc9wvRPzlRtovHVE4lkz8WEDr7uYh/GMQhSiIgFxGIArRHYdJE5kxtZjAf8rT0xhdLCzg== "@babel/plugin-syntax-async-generators@^7.8.4": version "7.8.4" @@ -760,11 +749,11 @@ "@babel/helper-plugin-utils" "^7.8.0" "@babel/plugin-syntax-jsx@^7.7.2": - version "7.23.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz#8f2e4f8a9b5f9aa16067e142c1ac9cd9f810f473" - integrity sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg== + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.1.tgz#3f6ca04b8c841811dbc3c5c5f837934e0d626c10" + integrity sha512-2eCtxZXf+kbkMIsXS4poTvT4Yu5rXiRa+9xGVT56raghjmBTKMpFNc9R4IDiB4emao9eO22Ox7CxuJG7BgExqA== dependencies: - "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-plugin-utils" "^7.24.0" "@babel/plugin-syntax-logical-assignment-operators@^7.8.3": version "7.10.4" @@ -816,48 +805,48 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-typescript@^7.7.2": - version "7.23.3" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz#24f460c85dbbc983cd2b9c4994178bcc01df958f" - integrity sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ== + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.1.tgz#b3bcc51f396d15f3591683f90239de143c076844" + integrity sha512-Yhnmvy5HZEnHUty6i++gcfH1/l68AHnItFHnaCv6hn9dNh0hQvvQJsxpi4BMBFN5DLeHBuucT/0DgzXif/OyRw== dependencies: - "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-plugin-utils" "^7.24.0" "@babel/runtime@^7.21.0": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.9.tgz#47791a15e4603bb5f905bc0753801cf21d6345f7" - integrity sha512-0CX6F+BI2s9dkUqr08KFrAIZgNFj75rdBU/DjCyYLIaV/quFjkk6T+EJ2LkZHyZTbEV4L5p97mNkUsHl2wLFAw== + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.1.tgz#431f9a794d173b53720e69a6464abc6f0e2a5c57" + integrity sha512-+BIznRzyqBf+2wCTxcKE3wDjfGeCoVE61KSHGpkzqrLi8qxqFwBeUFyId2cxkTmm55fzDGnm0+yCxaxygrLUnQ== dependencies: regenerator-runtime "^0.14.0" -"@babel/template@^7.22.15", "@babel/template@^7.23.9", "@babel/template@^7.3.3": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.23.9.tgz#f881d0487cba2828d3259dcb9ef5005a9731011a" - integrity sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA== +"@babel/template@^7.22.15", "@babel/template@^7.24.0", "@babel/template@^7.3.3": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.0.tgz#c6a524aa93a4a05d66aaf31654258fae69d87d50" + integrity sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA== dependencies: "@babel/code-frame" "^7.23.5" - "@babel/parser" "^7.23.9" - "@babel/types" "^7.23.9" + "@babel/parser" "^7.24.0" + "@babel/types" "^7.24.0" -"@babel/traverse@^7.23.9": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.23.9.tgz#2f9d6aead6b564669394c5ce0f9302bb65b9d950" - integrity sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg== +"@babel/traverse@^7.24.1": + version "7.24.1" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.1.tgz#d65c36ac9dd17282175d1e4a3c49d5b7988f530c" + integrity sha512-xuU6o9m68KeqZbQuDt2TcKSxUw/mrsvavlEqQ1leZ/B+C9tk6E4sRWy97WaXgvq5E+nU3cXMxv3WKOCanVMCmQ== dependencies: - "@babel/code-frame" "^7.23.5" - "@babel/generator" "^7.23.6" + "@babel/code-frame" "^7.24.1" + "@babel/generator" "^7.24.1" "@babel/helper-environment-visitor" "^7.22.20" "@babel/helper-function-name" "^7.23.0" "@babel/helper-hoist-variables" "^7.22.5" "@babel/helper-split-export-declaration" "^7.22.6" - "@babel/parser" "^7.23.9" - "@babel/types" "^7.23.9" + "@babel/parser" "^7.24.1" + "@babel/types" "^7.24.0" debug "^4.3.1" globals "^11.1.0" -"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.15", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.23.6", "@babel/types@^7.23.9", "@babel/types@^7.3.3": - version "7.23.9" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.23.9.tgz#1dd7b59a9a2b5c87f8b41e52770b5ecbf492e002" - integrity sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q== +"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.24.0", "@babel/types@^7.3.3": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.0.tgz#3b951f435a92e7333eba05b7566fd297960ea1bf" + integrity sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w== dependencies: "@babel/helper-string-parser" "^7.23.4" "@babel/helper-validator-identifier" "^7.22.20" @@ -982,25 +971,25 @@ dependencies: "@jridgewell/trace-mapping" "0.3.9" -"@datadog/native-appsec@7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/@datadog/native-appsec/-/native-appsec-7.0.0.tgz#a380174dd49aef2d9bb613a0ec8ead6dc7822095" - integrity sha512-bywstWFW2hWxzPuS0+mFMVHHL0geulx5yQFtsjfszaH2LTAgk2D+Rt40MKbAoZ8q3tRw2dy6aYQ7svO3ca8jpA== +"@datadog/native-appsec@7.1.0": + version "7.1.0" + resolved "https://registry.yarnpkg.com/@datadog/native-appsec/-/native-appsec-7.1.0.tgz#e8e6254236ac6fd7d4fb8b1156b34de64ec3e174" + integrity sha512-5FATunIxmvuSGDwPmbXfOi21wC7rjfbdLX4QiT5LR+iRLjRLT5iETqwdTsqy0WOQIHmxdWuddRvuakAg3921aA== dependencies: node-gyp-build "^3.9.0" -"@datadog/native-iast-rewriter@2.2.2": - version "2.2.2" - resolved "https://registry.yarnpkg.com/@datadog/native-iast-rewriter/-/native-iast-rewriter-2.2.2.tgz#3f7feaf6be1af4c83ad063065b8ed509bbaf11cb" - integrity sha512-13ZBhJpjZ/tiV6rYfyAf/ITye9cyd3x12M/2NKhD4Ivev4N4uKBREAjpArOtzKtPXZ5b6oXwVV4ofT1SHoYyzA== +"@datadog/native-iast-rewriter@2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@datadog/native-iast-rewriter/-/native-iast-rewriter-2.3.0.tgz#67abddc698504ad76736c0d49681bcf9e330e1bd" + integrity sha512-78ivSaaSXOaHn3VumF9kcSI443nbPfVAWsnDTH9X1ZbqXjHpSlHHTZgK9z/TNbkvuJarS/X1GBioPMcgea1Ejg== dependencies: lru-cache "^7.14.0" node-gyp-build "^4.5.0" -"@datadog/native-iast-taint-tracking@1.6.4": - version "1.6.4" - resolved "https://registry.yarnpkg.com/@datadog/native-iast-taint-tracking/-/native-iast-taint-tracking-1.6.4.tgz#16c21ad7c36a53420c0d3c5a3720731809cc7e98" - integrity sha512-Owxk7hQ4Dxwv4zJAoMjRga0IvE6lhvxnNc8pJCHsemCWBXchjr/9bqg05Zy5JnMbKUWn4XuZeJD6RFZpRa8bfw== +"@datadog/native-iast-taint-tracking@1.7.0": + version "1.7.0" + resolved "https://registry.yarnpkg.com/@datadog/native-iast-taint-tracking/-/native-iast-taint-tracking-1.7.0.tgz#0bcd4f287c25403d2a965939b33c35ae8544b363" + integrity sha512-p3qnYJrUr9TQ38tuOFoutDAQWOobLdaaWpTl0SHu126JH3ANBxWL/QirtJy6czfzLpm5eXwYHwHidD1Y0mNPdg== dependencies: node-gyp-build "^3.9.0" @@ -1012,15 +1001,15 @@ node-addon-api "^6.1.0" node-gyp-build "^3.9.0" -"@datadog/pprof@5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@datadog/pprof/-/pprof-5.0.0.tgz#0c0aaf06def6d2bc4b2d353ec7b264dadbfbefab" - integrity sha512-vhNan4SBuNWLpexunDJQ+hNbRAgWdk2qy5Iyh7Nn94uSSHXigAJMAvu4jwMKKQKFfchtobOkWT8GQUWW3tgpFg== +"@datadog/pprof@5.2.0": + version "5.2.0" + resolved "https://registry.yarnpkg.com/@datadog/pprof/-/pprof-5.2.0.tgz#a6c2779335b4f0fd51754e4de193e98591de387e" + integrity sha512-pSwLARpNLAIV1JttxXOBRKTn/NQYXDy1PJaV458YFDdAYxnBqpsYTat3/nX+8V5GoN4SfdHDci3zqXM+Ym66gQ== dependencies: delay "^5.0.0" node-gyp-build "<4.0" p-limit "^3.1.0" - pprof-format "^2.0.7" + pprof-format "^2.1.0" source-map "^0.7.4" "@datadog/sketches-js@^2.1.0": @@ -1055,10 +1044,10 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@8.56.0": - version "8.56.0" - resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.56.0.tgz#ef20350fec605a7f7035a01764731b2de0f3782b" - integrity sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A== +"@eslint/js@8.57.0": + version "8.57.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.0.tgz#a5417ae8427873f1dd08b70b3574b453e67b5f7f" + integrity sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g== "@ethereumjs/rlp@^4.0.1": version "4.0.1" @@ -1428,7 +1417,7 @@ dependencies: "@hapi/hoek" "^9.0.0" -"@humanwhocodes/config-array@^0.11.13": +"@humanwhocodes/config-array@^0.11.14": version "0.11.14" resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.14.tgz#d78e481a039f7566ecc9660b4ea7fe6b1fec442b" integrity sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg== @@ -1463,7 +1452,7 @@ js-yaml "^3.13.1" resolve-from "^5.0.0" -"@istanbuljs/schema@^0.1.2": +"@istanbuljs/schema@^0.1.2", "@istanbuljs/schema@^0.1.3": version "0.1.3" resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== @@ -1660,24 +1649,24 @@ "@types/yargs" "^17.0.8" chalk "^4.0.0" -"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2": - version "0.3.3" - resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz#7e02e6eb5df901aaedb08514203b096614024098" - integrity sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ== +"@jridgewell/gen-mapping@^0.3.5": + version "0.3.5" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36" + integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg== dependencies: - "@jridgewell/set-array" "^1.0.1" + "@jridgewell/set-array" "^1.2.1" "@jridgewell/sourcemap-codec" "^1.4.10" - "@jridgewell/trace-mapping" "^0.3.9" + "@jridgewell/trace-mapping" "^0.3.24" "@jridgewell/resolve-uri@^3.0.3", "@jridgewell/resolve-uri@^3.1.0": - version "3.1.1" - resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721" - integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA== + version "3.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== -"@jridgewell/set-array@^1.0.1": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72" - integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw== +"@jridgewell/set-array@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" + integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== "@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": version "1.4.15" @@ -1692,10 +1681,10 @@ "@jridgewell/resolve-uri" "^3.0.3" "@jridgewell/sourcemap-codec" "^1.4.10" -"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.9": - version "0.3.22" - resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.22.tgz#72a621e5de59f5f1ef792d0793a82ee20f645e4c" - integrity sha512-Wf963MzWtA2sjrNt+g18IAln9lKnlRp+K2eH4jjIoF1wYeq3aMREpG09xhlhdzS0EjwU7qmUJYangWa+151vZw== +"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": + version "0.3.25" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" + integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== dependencies: "@jridgewell/resolve-uri" "^3.1.0" "@jridgewell/sourcemap-codec" "^1.4.14" @@ -1715,10 +1704,10 @@ resolved "https://registry.yarnpkg.com/@microsoft/tsdoc/-/tsdoc-0.14.2.tgz#c3ec604a0b54b9a9b87e9735dfc59e1a5da6a5fb" integrity sha512-9b8mPpKrfeGRuhFH5iO1iwCLeIIsV6+H1sRfxbkoGXIyQE2BTsPd9zqSqQJ+pv5sJ/hT5M1zvOFL02MnEezFug== -"@mongodb-js/saslprep@^1.1.0": - version "1.1.4" - resolved "https://registry.yarnpkg.com/@mongodb-js/saslprep/-/saslprep-1.1.4.tgz#24ec1c4915a65f5c506bb88c081731450d91bb1c" - integrity sha512-8zJ8N1x51xo9hwPh6AWnKdLGEC5N3lDa6kms1YHmFBoRhTpJR6HG8wWk0td1MVCu9cD4YBrvjZEtd5Obw0Fbnw== +"@mongodb-js/saslprep@^1.1.0", "@mongodb-js/saslprep@^1.1.5": + version "1.1.5" + resolved "https://registry.yarnpkg.com/@mongodb-js/saslprep/-/saslprep-1.1.5.tgz#0c48a96c8d799e81fae311b7251aa5c1dc7c6e95" + integrity sha512-XLNOMH66KhJzUJNwT/qlMnS4WsNDWD5ASdyaSH3EtK+F4r/CFGa3jT4GNi4mfOitGvWXtdLgQJkQjxSVrio+jA== dependencies: sparse-bitfield "^3.0.3" @@ -1772,27 +1761,27 @@ resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-ethers/-/hardhat-ethers-2.2.3.tgz#b41053e360c31a32c2640c9a45ee981a7e603fe0" integrity sha512-YhzPdzb612X591FOe68q+qXVXGG2ANZRvDo0RRUtimev85rCrAlv/TLMEZw5c+kq9AbzocLTVX/h2jVIFPL9Xg== -"@opentelemetry/api@^1.0.0": - version "1.7.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.7.0.tgz#b139c81999c23e3c8d3c0a7234480e945920fc40" - integrity sha512-AdY5wvN0P2vXBi3b29hxZgSFvdhdxPB9+f0B6s//P9Q8nibRWeA3cHm8UmLpio9ABigkVHJ5NMPk+Mz8VCCyrw== +"@opentelemetry/api@^1.0.0", "@opentelemetry/api@^1.4.0": + version "1.8.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/api/-/api-1.8.0.tgz#5aa7abb48f23f693068ed2999ae627d2f7d902ec" + integrity sha512-I/s6F7yKUDdtMsoBWXJe8Qz40Tui5vsuKCWJEWVL+5q9sSWRzzx6v2KeNsOBEwd94j0eWkpWCH4yB6rZg9Mf0w== "@opentelemetry/core@^1.14.0": - version "1.21.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-1.21.0.tgz#8c16faf16edf861b073c03c9d45977b3f4003ee1" - integrity sha512-KP+OIweb3wYoP7qTYL/j5IpOlu52uxBv5M4+QhSmmUfLyTgu1OIS71msK3chFo1D6Y61BIH3wMiMYRCxJCQctA== + version "1.22.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/core/-/core-1.22.0.tgz#a9f33689acd4703ac780c6595497374e2113c7e5" + integrity sha512-0VoAlT6x+Xzik1v9goJ3pZ2ppi6+xd3aUfg4brfrLkDBHRIVjMP0eBHrKrhB+NKcDyMAg8fAbGL3Npg/F6AwWA== dependencies: - "@opentelemetry/semantic-conventions" "1.21.0" + "@opentelemetry/semantic-conventions" "1.22.0" -"@opentelemetry/semantic-conventions@1.21.0": - version "1.21.0" - resolved "https://registry.yarnpkg.com/@opentelemetry/semantic-conventions/-/semantic-conventions-1.21.0.tgz#83f7479c524ab523ac2df702ade30b9724476c72" - integrity sha512-lkC8kZYntxVKr7b8xmjCVUgE0a8xgDakPyDo9uSWavXPyYqLgYYGdEd2j8NxihRyb6UwpX3G/hFUF4/9q2V+/g== +"@opentelemetry/semantic-conventions@1.22.0": + version "1.22.0" + resolved "https://registry.yarnpkg.com/@opentelemetry/semantic-conventions/-/semantic-conventions-1.22.0.tgz#d7502533a7c96e25baab86bac965468e0703a8b4" + integrity sha512-CAOgFOKLybd02uj/GhCdEeeBjOS0yeoDeo/CA7ASBSmenpZHAKGB3iDm/rv3BQLcabb/OprDEsSQ1y0P8A7Siw== "@openzeppelin/contracts@^4.7.3": - version "4.9.5" - resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.5.tgz#1eed23d4844c861a1835b5d33507c1017fa98de8" - integrity sha512-ZK+W5mVhRppff9BE6YdR8CC52C8zAvsVAiWhEtQ5+oNxFE6h1WdeWo+FJSF8KKvtxxVYZ7MTP/5KoVpAU3aSWg== + version "4.9.6" + resolved "https://registry.yarnpkg.com/@openzeppelin/contracts/-/contracts-4.9.6.tgz#2a880a24eb19b4f8b25adc2a5095f2aa27f39677" + integrity sha512-xSmezSupL+y9VkHZJGDoCBpmnB2ogM13ccaYDWqJTfS3dbuHkgjuwDFUmaFauBCboQMGB/S5UqUl2y54X99BmA== "@pkgr/core@^0.1.0": version "0.1.1" @@ -1887,9 +1876,9 @@ integrity sha512-IFjIgTusQym2B5IZJG3XKr5llka7ey84fw/NOYqESP5WUfQs9zz1ww/9+qoz4ka/S6KcGBodzlCeZ5UImKbscg== "@scure/base@~1.1.0", "@scure/base@~1.1.2", "@scure/base@~1.1.4": - version "1.1.5" - resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.5.tgz#1d85d17269fe97694b9c592552dd9e5e33552157" - integrity sha512-Brj9FiG2W1MRQSTB212YVPRrcbjkv48FoZi/u4l/zds/ieRrqsh7aUf6CLwkAq61oKXr/ZlTzlY66gLIj3TFTQ== + version "1.1.6" + resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.6.tgz#8ce5d304b436e4c84f896e0550c83e4d88cb917d" + integrity sha512-ok9AWwhcgYuGG3Zfhyqg+zwl+Wn5uE+dwC0NV/2qQkx4dABbb/bx96vWu8NSj+BNjjSjno+JRYRjle1jV08k3g== "@scure/bip32@1.3.2": version "1.3.2" @@ -1990,387 +1979,388 @@ p-queue "^6.6.1" p-retry "^4.0.0" -"@smithy/abort-controller@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-2.1.1.tgz#bb68596a7c8213c2ef259bc7fb0f0c118c67ea9d" - integrity sha512-1+qdrUqLhaALYL0iOcN43EP6yAXXQ2wWZ6taf4S2pNGowmOc5gx+iMQv+E42JizNJjB0+gEadOXeV1Bf7JWL1Q== +"@smithy/abort-controller@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-2.2.0.tgz#18983401a5e2154b5c94057730024a7d14cbcd35" + integrity sha512-wRlta7GuLWpTqtFfGo+nZyOO1vEvewdNR1R4rTxpC8XU6vG/NDyrFBhwLZsqg1NUoR1noVaXJPC/7ZK47QCySw== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/config-resolver@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/config-resolver/-/config-resolver-2.1.1.tgz#fc6b036084b98fd26a8ff01a5d7eb676e41749c7" - integrity sha512-lxfLDpZm+AWAHPFZps5JfDoO9Ux1764fOgvRUBpHIO8HWHcSN1dkgsago1qLRVgm1BZ8RCm8cgv99QvtaOWIhw== +"@smithy/config-resolver@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/config-resolver/-/config-resolver-2.2.0.tgz#54f40478bb61709b396960a3535866dba5422757" + integrity sha512-fsiMgd8toyUba6n1WRmr+qACzXltpdDkPTAaDqc8QqPBUzO+/JKwL6bUBseHVi8tu9l+3JOK+tSf7cay+4B3LA== dependencies: - "@smithy/node-config-provider" "^2.2.1" - "@smithy/types" "^2.9.1" - "@smithy/util-config-provider" "^2.2.1" - "@smithy/util-middleware" "^2.1.1" - tslib "^2.5.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/types" "^2.12.0" + "@smithy/util-config-provider" "^2.3.0" + "@smithy/util-middleware" "^2.2.0" + tslib "^2.6.2" -"@smithy/core@^1.3.1": - version "1.3.2" - resolved "https://registry.yarnpkg.com/@smithy/core/-/core-1.3.2.tgz#e11f3860b69ec0bdbd31e6afaa54963c02dc7f8e" - integrity sha512-tYDmTp0f2TZVE18jAOH1PnmkngLQ+dOGUlMd1u67s87ieueNeyqhja6z/Z4MxhybEiXKOWFOmGjfTZWFxljwJw== - dependencies: - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-retry" "^2.1.1" - "@smithy/middleware-serde" "^2.1.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/util-middleware" "^2.1.1" - tslib "^2.5.0" - -"@smithy/credential-provider-imds@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@smithy/credential-provider-imds/-/credential-provider-imds-2.2.1.tgz#4805bf5e104718b959cf8699113fa9de6ddeeafa" - integrity sha512-7XHjZUxmZYnONheVQL7j5zvZXga+EWNgwEAP6OPZTi7l8J4JTeNh9aIOfE5fKHZ/ee2IeNOh54ZrSna+Vc6TFA== - dependencies: - "@smithy/node-config-provider" "^2.2.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - tslib "^2.5.0" - -"@smithy/eventstream-codec@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/eventstream-codec/-/eventstream-codec-2.1.1.tgz#4405ab0f9c77d439c575560c4886e59ee17d6d38" - integrity sha512-E8KYBxBIuU4c+zrpR22VsVrOPoEDzk35bQR3E+xm4k6Pa6JqzkDOdMyf9Atac5GPNKHJBdVaQ4JtjdWX2rl/nw== +"@smithy/core@^1.4.0": + version "1.4.0" + resolved "https://registry.yarnpkg.com/@smithy/core/-/core-1.4.0.tgz#5f9f86b681b9cbf23904041dad6f0531efe8375e" + integrity sha512-uu9ZDI95Uij4qk+L6kyFjdk11zqBkcJ3Lv0sc6jZrqHvLyr0+oeekD3CnqMafBn/5PRI6uv6ulW3kNLRBUHeVw== + dependencies: + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-retry" "^2.2.0" + "@smithy/middleware-serde" "^2.3.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-middleware" "^2.2.0" + tslib "^2.6.2" + +"@smithy/credential-provider-imds@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/credential-provider-imds/-/credential-provider-imds-2.3.0.tgz#326ce401b82e53f3c7ee4862a066136959a06166" + integrity sha512-BWB9mIukO1wjEOo1Ojgl6LrG4avcaC7T/ZP6ptmAaW4xluhSIPZhY+/PI5YKzlk+jsm+4sQZB45Bt1OfMeQa3w== + dependencies: + "@smithy/node-config-provider" "^2.3.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + tslib "^2.6.2" + +"@smithy/eventstream-codec@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/eventstream-codec/-/eventstream-codec-2.2.0.tgz#63d74fa817188995eb55e792a38060b0ede98dc4" + integrity sha512-8janZoJw85nJmQZc4L8TuePp2pk1nxLgkxIR0TUjKJ5Dkj5oelB9WtiSSGXCQvNsJl0VSTvK/2ueMXxvpa9GVw== dependencies: "@aws-crypto/crc32" "3.0.0" - "@smithy/types" "^2.9.1" - "@smithy/util-hex-encoding" "^2.1.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-hex-encoding" "^2.2.0" + tslib "^2.6.2" -"@smithy/fetch-http-handler@^2.4.1": - version "2.4.1" - resolved "https://registry.yarnpkg.com/@smithy/fetch-http-handler/-/fetch-http-handler-2.4.1.tgz#b4d73bbc1449f61234077d58c705b843a8587bf0" - integrity sha512-VYGLinPsFqH68lxfRhjQaSkjXM7JysUOJDTNjHBuN/ykyRb2f1gyavN9+VhhPTWCy32L4yZ2fdhpCs/nStEicg== +"@smithy/fetch-http-handler@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@smithy/fetch-http-handler/-/fetch-http-handler-2.5.0.tgz#0b8e1562807fdf91fe7dd5cde620d7a03ddc10ac" + integrity sha512-BOWEBeppWhLn/no/JxUL/ghTfANTjT7kg3Ww2rPqTUY9R4yHPXxJ9JhMe3Z03LN3aPwiwlpDIUcVw1xDyHqEhw== dependencies: - "@smithy/protocol-http" "^3.1.1" - "@smithy/querystring-builder" "^2.1.1" - "@smithy/types" "^2.9.1" - "@smithy/util-base64" "^2.1.1" - tslib "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/querystring-builder" "^2.2.0" + "@smithy/types" "^2.12.0" + "@smithy/util-base64" "^2.3.0" + tslib "^2.6.2" -"@smithy/hash-node@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/hash-node/-/hash-node-2.1.1.tgz#0f8a22d97565ca948724f72267e4d3a2f33740a8" - integrity sha512-Qhoq0N8f2OtCnvUpCf+g1vSyhYQrZjhSwvJ9qvR8BUGOtTXiyv2x1OD2e6jVGmlpC4E4ax1USHoyGfV9JFsACg== +"@smithy/hash-node@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/hash-node/-/hash-node-2.2.0.tgz#df29e1e64811be905cb3577703b0e2d0b07fc5cc" + integrity sha512-zLWaC/5aWpMrHKpoDF6nqpNtBhlAYKF/7+9yMN7GpdR8CzohnWfGtMznPybnwSS8saaXBMxIGwJqR4HmRp6b3g== dependencies: - "@smithy/types" "^2.9.1" - "@smithy/util-buffer-from" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-buffer-from" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" -"@smithy/invalid-dependency@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/invalid-dependency/-/invalid-dependency-2.1.1.tgz#bd69fa24dd35e9bc65a160bd86becdf1399e4463" - integrity sha512-7WTgnKw+VPg8fxu2v9AlNOQ5yaz6RA54zOVB4f6vQuR0xFKd+RzlCpt0WidYTsye7F+FYDIaS/RnJW4pxjNInw== +"@smithy/invalid-dependency@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/invalid-dependency/-/invalid-dependency-2.2.0.tgz#ee3d8980022cb5edb514ac187d159b3e773640f0" + integrity sha512-nEDASdbKFKPXN2O6lOlTgrEEOO9NHIeO+HVvZnkqc8h5U9g3BIhWsvzFo+UcUbliMHvKNPD/zVxDrkP1Sbgp8Q== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/is-array-buffer@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/is-array-buffer/-/is-array-buffer-2.1.1.tgz#07b4c77ae67ed58a84400c76edd482271f9f957b" - integrity sha512-xozSQrcUinPpNPNPds4S7z/FakDTh1MZWtRP/2vQtYB/u3HYrX2UXuZs+VhaKBd6Vc7g2XPr2ZtwGBNDN6fNKQ== +"@smithy/is-array-buffer@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/is-array-buffer/-/is-array-buffer-2.2.0.tgz#f84f0d9f9a36601a9ca9381688bd1b726fd39111" + integrity sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/middleware-content-length@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-content-length/-/middleware-content-length-2.1.1.tgz#df767de12d594bc5622009fb0fc8343522697d8c" - integrity sha512-rSr9ezUl9qMgiJR0UVtVOGEZElMdGFyl8FzWEF5iEKTlcWxGr2wTqGfDwtH3LAB7h+FPkxqv4ZU4cpuCN9Kf/g== +"@smithy/middleware-content-length@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/middleware-content-length/-/middleware-content-length-2.2.0.tgz#a82e97bd83d8deab69e07fea4512563bedb9461a" + integrity sha512-5bl2LG1Ah/7E5cMSC+q+h3IpVHMeOkG0yLRyQT1p2aMJkSrZG7RlXHPuAgb7EyaFeidKEnnd/fNaLLaKlHGzDQ== dependencies: - "@smithy/protocol-http" "^3.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/middleware-endpoint@^2.4.1": - version "2.4.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-2.4.1.tgz#9e500df4d944741808e92018ccd2e948b598a49f" - integrity sha512-XPZTb1E2Oav60Ven3n2PFx+rX9EDsU/jSTA8VDamt7FXks67ekjPY/XrmmPDQaFJOTUHJNKjd8+kZxVO5Ael4Q== - dependencies: - "@smithy/middleware-serde" "^2.1.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/url-parser" "^2.1.1" - "@smithy/util-middleware" "^2.1.1" - tslib "^2.5.0" - -"@smithy/middleware-retry@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-2.1.1.tgz#ddc749dd927f136714f76ca5a52dcfb0993ee162" - integrity sha512-eMIHOBTXro6JZ+WWzZWd/8fS8ht5nS5KDQjzhNMHNRcG5FkNTqcKpYhw7TETMYzbLfhO5FYghHy1vqDWM4FLDA== - dependencies: - "@smithy/node-config-provider" "^2.2.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/service-error-classification" "^2.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/util-middleware" "^2.1.1" - "@smithy/util-retry" "^2.1.1" - tslib "^2.5.0" +"@smithy/middleware-endpoint@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-2.5.0.tgz#9f1459e9b4cbf00fadfd99e98f88d4b1a2aeb987" + integrity sha512-OBhI9ZEAG8Xen0xsFJwwNOt44WE2CWkfYIxTognC8x42Lfsdf0VN/wCMqpdkySMDio/vts10BiovAxQp0T0faA== + dependencies: + "@smithy/middleware-serde" "^2.3.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + "@smithy/url-parser" "^2.2.0" + "@smithy/util-middleware" "^2.2.0" + tslib "^2.6.2" + +"@smithy/middleware-retry@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-2.2.0.tgz#ff48ac01ad57394eeea15a0146a86079cf6364b7" + integrity sha512-PsjDOLpbevgn37yJbawmfVoanru40qVA8UEf2+YA1lvOefmhuhL6ZbKtGsLAWDRnE1OlAmedsbA/htH6iSZjNA== + dependencies: + "@smithy/node-config-provider" "^2.3.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/service-error-classification" "^2.1.5" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-retry" "^2.2.0" + tslib "^2.6.2" uuid "^8.3.2" -"@smithy/middleware-serde@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-serde/-/middleware-serde-2.1.1.tgz#2c5750f76e276a5249720f6c3c24fac29abbee16" - integrity sha512-D8Gq0aQBeE1pxf3cjWVkRr2W54t+cdM2zx78tNrVhqrDykRA7asq8yVJij1u5NDtKzKqzBSPYh7iW0svUKg76g== +"@smithy/middleware-serde@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/middleware-serde/-/middleware-serde-2.3.0.tgz#a7615ba646a88b6f695f2d55de13d8158181dd13" + integrity sha512-sIADe7ojwqTyvEQBe1nc/GXB9wdHhi9UwyX0lTyttmUWDJLP655ZYE1WngnNyXREme8I27KCaUhyhZWRXL0q7Q== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/middleware-stack@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-stack/-/middleware-stack-2.1.1.tgz#67f992dc36e8a6861f881f80a81c1c30956a0396" - integrity sha512-KPJhRlhsl8CjgGXK/DoDcrFGfAqoqvuwlbxy+uOO4g2Azn1dhH+GVfC3RAp+6PoL5PWPb+vt6Z23FP+Mr6qeCw== +"@smithy/middleware-stack@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/middleware-stack/-/middleware-stack-2.2.0.tgz#3fb49eae6313f16f6f30fdaf28e11a7321f34d9f" + integrity sha512-Qntc3jrtwwrsAC+X8wms8zhrTr0sFXnyEGhZd9sLtsJ/6gGQKFzNB+wWbOcpJd7BR8ThNCoKt76BuQahfMvpeA== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/node-config-provider@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@smithy/node-config-provider/-/node-config-provider-2.2.1.tgz#c440c7948d58d72f0e212aa1967aa12f0729defd" - integrity sha512-epzK3x1xNxA9oJgHQ5nz+2j6DsJKdHfieb+YgJ7ATWxzNcB7Hc+Uya2TUck5MicOPhDV8HZImND7ZOecVr+OWg== +"@smithy/node-config-provider@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/node-config-provider/-/node-config-provider-2.3.0.tgz#9fac0c94a14c5b5b8b8fa37f20c310a844ab9922" + integrity sha512-0elK5/03a1JPWMDPaS726Iw6LpQg80gFut1tNpPfxFuChEEklo2yL823V94SpTZTxmKlXFtFgsP55uh3dErnIg== dependencies: - "@smithy/property-provider" "^2.1.1" - "@smithy/shared-ini-file-loader" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/shared-ini-file-loader" "^2.4.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/node-http-handler@^2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-2.3.1.tgz#77d23279ff0a12cbe7cde93c5e7c0e86ad56dd20" - integrity sha512-gLA8qK2nL9J0Rk/WEZSvgin4AppvuCYRYg61dcUo/uKxvMZsMInL5I5ZdJTogOvdfVug3N2dgI5ffcUfS4S9PA== +"@smithy/node-http-handler@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-2.5.0.tgz#7b5e0565dd23d340380489bd5fe4316d2bed32de" + integrity sha512-mVGyPBzkkGQsPoxQUbxlEfRjrj6FPyA3u3u2VXGr9hT8wilsoQdZdvKpMBFMB8Crfhv5dNkKHIW0Yyuc7eABqA== dependencies: - "@smithy/abort-controller" "^2.1.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/querystring-builder" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/abort-controller" "^2.2.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/querystring-builder" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/property-provider@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/property-provider/-/property-provider-2.1.1.tgz#0f7ffc5e43829eaca5b2b5aae8554807a52b30f3" - integrity sha512-FX7JhhD/o5HwSwg6GLK9zxrMUrGnb3PzNBrcthqHKBc3dH0UfgEAU24xnJ8F0uow5mj17UeBEOI6o3CF2k7Mhw== +"@smithy/property-provider@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/property-provider/-/property-provider-2.2.0.tgz#37e3525a3fa3e11749f86a4f89f0fd7765a6edb0" + integrity sha512-+xiil2lFhtTRzXkx8F053AV46QnIw6e7MV8od5Mi68E1ICOjCeCHw2XfLnDEUHnT9WGUIkwcqavXjfwuJbGlpg== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/protocol-http@^3.1.1": - version "3.1.1" - resolved "https://registry.yarnpkg.com/@smithy/protocol-http/-/protocol-http-3.1.1.tgz#eee522d0ed964a72b735d64925e07bcfb7a7806f" - integrity sha512-6ZRTSsaXuSL9++qEwH851hJjUA0OgXdQFCs+VDw4tGH256jQ3TjYY/i34N4vd24RV3nrjNsgd1yhb57uMoKbzQ== +"@smithy/protocol-http@^3.3.0": + version "3.3.0" + resolved "https://registry.yarnpkg.com/@smithy/protocol-http/-/protocol-http-3.3.0.tgz#a37df7b4bb4960cdda560ce49acfd64c455e4090" + integrity sha512-Xy5XK1AFWW2nlY/biWZXu6/krgbaf2dg0q492D8M5qthsnU2H+UgFeZLbM76FnH7s6RO/xhQRkj+T6KBO3JzgQ== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/querystring-builder@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-2.1.1.tgz#b9693448ad3f8e0767d84cf5cae29f35514591fb" - integrity sha512-C/ko/CeEa8jdYE4gt6nHO5XDrlSJ3vdCG0ZAc6nD5ZIE7LBp0jCx4qoqp7eoutBu7VrGMXERSRoPqwi1WjCPbg== +"@smithy/querystring-builder@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-2.2.0.tgz#22937e19fcd0aaa1a3e614ef8cb6f8e86756a4ef" + integrity sha512-L1kSeviUWL+emq3CUVSgdogoM/D9QMFaqxL/dd0X7PCNWmPXqt+ExtrBjqT0V7HLN03Vs9SuiLrG3zy3JGnE5A== dependencies: - "@smithy/types" "^2.9.1" - "@smithy/util-uri-escape" "^2.1.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-uri-escape" "^2.2.0" + tslib "^2.6.2" -"@smithy/querystring-parser@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/querystring-parser/-/querystring-parser-2.1.1.tgz#a4282a66cc56844317dbff824e573f469bbfc032" - integrity sha512-H4+6jKGVhG1W4CIxfBaSsbm98lOO88tpDWmZLgkJpt8Zkk/+uG0FmmqMuCAc3HNM2ZDV+JbErxr0l5BcuIf/XQ== +"@smithy/querystring-parser@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/querystring-parser/-/querystring-parser-2.2.0.tgz#24a5633f4b3806ff2888d4c2f4169e105fdffd79" + integrity sha512-BvHCDrKfbG5Yhbpj4vsbuPV2GgcpHiAkLeIlcA1LtfpMz3jrqizP1+OguSNSj1MwBHEiN+jwNisXLGdajGDQJA== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/service-error-classification@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/service-error-classification/-/service-error-classification-2.1.1.tgz#dd24e1ec529ae9ec8e87d8b15f0fc8f7e17f3d02" - integrity sha512-txEdZxPUgM1PwGvDvHzqhXisrc5LlRWYCf2yyHfvITWioAKat7srQvpjMAvgzf0t6t7j8yHrryXU9xt7RZqFpw== +"@smithy/service-error-classification@^2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@smithy/service-error-classification/-/service-error-classification-2.1.5.tgz#0568a977cc0db36299d8703a5d8609c1f600c005" + integrity sha512-uBDTIBBEdAQryvHdc5W8sS5YX7RQzF683XrHePVdFmAgKiMofU15FLSM0/HU03hKTnazdNRFa0YHS7+ArwoUSQ== dependencies: - "@smithy/types" "^2.9.1" + "@smithy/types" "^2.12.0" -"@smithy/shared-ini-file-loader@^2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-2.3.1.tgz#a2e28b4d85f8a8262a84403fa2b74a086b3a7703" - integrity sha512-2E2kh24igmIznHLB6H05Na4OgIEilRu0oQpYXo3LCNRrawHAcfDKq9004zJs+sAMt2X5AbY87CUCJ7IpqpSgdw== +"@smithy/shared-ini-file-loader@^2.4.0": + version "2.4.0" + resolved "https://registry.yarnpkg.com/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-2.4.0.tgz#1636d6eb9bff41e36ac9c60364a37fd2ffcb9947" + integrity sha512-WyujUJL8e1B6Z4PBfAqC/aGY1+C7T0w20Gih3yrvJSk97gpiVfB+y7c46T4Nunk+ZngLq0rOIdeVeIklk0R3OA== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/signature-v4@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-2.1.1.tgz#6080171e3d694f40d3f553bbc236c5c433efd4d2" - integrity sha512-Hb7xub0NHuvvQD3YwDSdanBmYukoEkhqBjqoxo+bSdC0ryV9cTfgmNjuAQhTPYB6yeU7hTR+sPRiFMlxqv6kmg== - dependencies: - "@smithy/eventstream-codec" "^2.1.1" - "@smithy/is-array-buffer" "^2.1.1" - "@smithy/types" "^2.9.1" - "@smithy/util-hex-encoding" "^2.1.1" - "@smithy/util-middleware" "^2.1.1" - "@smithy/util-uri-escape" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" - -"@smithy/smithy-client@^2.3.1": - version "2.3.1" - resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-2.3.1.tgz#0c3a4a0d3935c7ad2240cc23181f276705212b1f" - integrity sha512-YsTdU8xVD64r2pLEwmltrNvZV6XIAC50LN6ivDopdt+YiF/jGH6PY9zUOu0CXD/d8GMB8gbhnpPsdrjAXHS9QA== +"@smithy/signature-v4@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-2.2.0.tgz#8fe6a574188b71fba6056111b88d50c84babb060" + integrity sha512-+B5TNzj/fRZzVW3z8UUJOkNx15+4E0CLuvJmJUA1JUIZFp3rdJ/M2H5r2SqltaVPXL0oIxv/6YK92T9TsFGbFg== + dependencies: + "@smithy/eventstream-codec" "^2.2.0" + "@smithy/is-array-buffer" "^2.2.0" + "@smithy/types" "^2.12.0" + "@smithy/util-hex-encoding" "^2.2.0" + "@smithy/util-middleware" "^2.2.0" + "@smithy/util-uri-escape" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@smithy/smithy-client@^2.5.0": + version "2.5.0" + resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-2.5.0.tgz#8de4fff221d232dda34a8e706d6a4f2911dffe2e" + integrity sha512-DDXWHWdimtS3y/Kw1Jo46KQ0ZYsDKcldFynQERUGBPDpkW1lXOTHy491ALHjwfiBQvzsVKVxl5+ocXNIgJuX4g== dependencies: - "@smithy/middleware-endpoint" "^2.4.1" - "@smithy/middleware-stack" "^2.1.1" - "@smithy/protocol-http" "^3.1.1" - "@smithy/types" "^2.9.1" - "@smithy/util-stream" "^2.1.1" - tslib "^2.5.0" + "@smithy/middleware-endpoint" "^2.5.0" + "@smithy/middleware-stack" "^2.2.0" + "@smithy/protocol-http" "^3.3.0" + "@smithy/types" "^2.12.0" + "@smithy/util-stream" "^2.2.0" + tslib "^2.6.2" -"@smithy/types@^2.9.1": - version "2.9.1" - resolved "https://registry.yarnpkg.com/@smithy/types/-/types-2.9.1.tgz#ed04d4144eed3b8bd26d20fc85aae8d6e357ebb9" - integrity sha512-vjXlKNXyprDYDuJ7UW5iobdmyDm6g8dDG+BFUncAg/3XJaN45Gy5RWWWUVgrzIK7S4R1KWgIX5LeJcfvSI24bw== +"@smithy/types@^2.12.0": + version "2.12.0" + resolved "https://registry.yarnpkg.com/@smithy/types/-/types-2.12.0.tgz#c44845f8ba07e5e8c88eda5aed7e6a0c462da041" + integrity sha512-QwYgloJ0sVNBeBuBs65cIkTbfzV/Q6ZNPCJ99EICFEdJYG50nGIY/uYXp+TbsdJReIuPr0a0kXmCvren3MbRRw== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/url-parser@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/url-parser/-/url-parser-2.1.1.tgz#a30de227b6734650d740b6dff74d488b874e85e3" - integrity sha512-qC9Bv8f/vvFIEkHsiNrUKYNl8uKQnn4BdhXl7VzQRP774AwIjiSMMwkbT+L7Fk8W8rzYVifzJNYxv1HwvfBo3Q== +"@smithy/url-parser@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/url-parser/-/url-parser-2.2.0.tgz#6fcda6116391a4f61fef5580eb540e128359b3c0" + integrity sha512-hoA4zm61q1mNTpksiSWp2nEl1dt3j726HdRhiNgVJQMj7mLp7dprtF57mOB6JvEk/x9d2bsuL5hlqZbBuHQylQ== dependencies: - "@smithy/querystring-parser" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/querystring-parser" "^2.2.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/util-base64@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-base64/-/util-base64-2.1.1.tgz#af729085cc9d92ebd54a5d2c5d0aa5a0c31f83bf" - integrity sha512-UfHVpY7qfF/MrgndI5PexSKVTxSZIdz9InghTFa49QOvuu9I52zLPLUHXvHpNuMb1iD2vmc6R+zbv/bdMipR/g== +"@smithy/util-base64@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/util-base64/-/util-base64-2.3.0.tgz#312dbb4d73fb94249c7261aee52de4195c2dd8e2" + integrity sha512-s3+eVwNeJuXUwuMbusncZNViuhv2LjVJ1nMwTqSA0XAC7gjKhqqxRdJPhR8+YrkoZ9IiIbFk/yK6ACe/xlF+hw== dependencies: - "@smithy/util-buffer-from" "^2.1.1" - tslib "^2.5.0" + "@smithy/util-buffer-from" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" -"@smithy/util-body-length-browser@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-body-length-browser/-/util-body-length-browser-2.1.1.tgz#1fc77072768013ae646415eedb9833cd252d055d" - integrity sha512-ekOGBLvs1VS2d1zM2ER4JEeBWAvIOUKeaFch29UjjJsxmZ/f0L3K3x0dEETgh3Q9bkZNHgT+rkdl/J/VUqSRag== +"@smithy/util-body-length-browser@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-body-length-browser/-/util-body-length-browser-2.2.0.tgz#25620645c6b62b42594ef4a93b66e6ab70e27d2c" + integrity sha512-dtpw9uQP7W+n3vOtx0CfBD5EWd7EPdIdsQnWTDoFf77e3VUf05uA7R7TGipIo8e4WL2kuPdnsr3hMQn9ziYj5w== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-body-length-node@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@smithy/util-body-length-node/-/util-body-length-node-2.2.1.tgz#a6f5c9911f1c3e23efb340d5ce7a590b62f2056e" - integrity sha512-/ggJG+ta3IDtpNVq4ktmEUtOkH1LW64RHB5B0hcr5ZaWBmo96UX2cIOVbjCqqDickTXqBWZ4ZO0APuaPrD7Abg== +"@smithy/util-body-length-node@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/util-body-length-node/-/util-body-length-node-2.3.0.tgz#d065a9b5e305ff899536777bbfe075cdc980136f" + integrity sha512-ITWT1Wqjubf2CJthb0BuT9+bpzBfXeMokH/AAa5EJQgbv9aPMVfnM76iFIZVFf50hYXGbtiV71BHAthNWd6+dw== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-buffer-from@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-buffer-from/-/util-buffer-from-2.1.1.tgz#f9346bf8b23c5ba6f6bdb61dd9db779441ba8d08" - integrity sha512-clhNjbyfqIv9Md2Mg6FffGVrJxw7bgK7s3Iax36xnfVj6cg0fUG7I4RH0XgXJF8bxi+saY5HR21g2UPKSxVCXg== +"@smithy/util-buffer-from@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-buffer-from/-/util-buffer-from-2.2.0.tgz#6fc88585165ec73f8681d426d96de5d402021e4b" + integrity sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA== dependencies: - "@smithy/is-array-buffer" "^2.1.1" - tslib "^2.5.0" + "@smithy/is-array-buffer" "^2.2.0" + tslib "^2.6.2" -"@smithy/util-config-provider@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@smithy/util-config-provider/-/util-config-provider-2.2.1.tgz#aea0a80236d6cedaee60473802899cff4a8cc0ba" - integrity sha512-50VL/tx9oYYcjJn/qKqNy7sCtpD0+s8XEBamIFo4mFFTclKMNp+rsnymD796uybjiIquB7VCB/DeafduL0y2kw== +"@smithy/util-config-provider@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/util-config-provider/-/util-config-provider-2.3.0.tgz#bc79f99562d12a1f8423100ca662a6fb07cde943" + integrity sha512-HZkzrRcuFN1k70RLqlNK4FnPXKOpkik1+4JaBoHNJn+RnJGYqaa3c5/+XtLOXhlKzlRgNvyaLieHTW2VwGN0VQ== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-defaults-mode-browser@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-2.1.1.tgz#be9ac82acee6ec4821b610e7187b0e147f0ba8ff" - integrity sha512-lqLz/9aWRO6mosnXkArtRuQqqZBhNpgI65YDpww4rVQBuUT7qzKbDLG5AmnQTCiU4rOquaZO/Kt0J7q9Uic7MA== +"@smithy/util-defaults-mode-browser@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-2.2.0.tgz#963a9d3c3351272764dd1c5dc07c26f2c8abcb02" + integrity sha512-2okTdZaCBvOJszAPU/KSvlimMe35zLOKbQpHhamFJmR7t95HSe0K3C92jQPjKY3PmDBD+7iMkOnuW05F5OlF4g== dependencies: - "@smithy/property-provider" "^2.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" + "@smithy/property-provider" "^2.2.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" bowser "^2.11.0" - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-defaults-mode-node@^2.1.1": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-2.2.0.tgz#72fd6f945c265f1ef9be647fe829d55df5101390" - integrity sha512-iFJp/N4EtkanFpBUtSrrIbtOIBf69KNuve03ic1afhJ9/korDxdM0c6cCH4Ehj/smI9pDCfVv+bqT3xZjF2WaA== - dependencies: - "@smithy/config-resolver" "^2.1.1" - "@smithy/credential-provider-imds" "^2.2.1" - "@smithy/node-config-provider" "^2.2.1" - "@smithy/property-provider" "^2.1.1" - "@smithy/smithy-client" "^2.3.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" - -"@smithy/util-endpoints@^1.1.1": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-endpoints/-/util-endpoints-1.1.1.tgz#45426dba6fb42282a0ad955600b2b3ba050d118f" - integrity sha512-sI4d9rjoaekSGEtq3xSb2nMjHMx8QXcz2cexnVyRWsy4yQ9z3kbDpX+7fN0jnbdOp0b3KSTZJZ2Yb92JWSanLw== +"@smithy/util-defaults-mode-node@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-2.3.0.tgz#5005058ca0a299f0948b47c288f7c3d4f36cb26e" + integrity sha512-hfKXnNLmsW9cmLb/JXKIvtuO6Cf4SuqN5PN1C2Ru/TBIws+m1wSgb+A53vo0r66xzB6E82inKG2J7qtwdi+Kkw== + dependencies: + "@smithy/config-resolver" "^2.2.0" + "@smithy/credential-provider-imds" "^2.3.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/property-provider" "^2.2.0" + "@smithy/smithy-client" "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" + +"@smithy/util-endpoints@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-endpoints/-/util-endpoints-1.2.0.tgz#b8b805f47e8044c158372f69b88337703117665d" + integrity sha512-BuDHv8zRjsE5zXd3PxFXFknzBG3owCpjq8G3FcsXW3CykYXuEqM3nTSsmLzw5q+T12ZYuDlVUZKBdpNbhVtlrQ== dependencies: - "@smithy/node-config-provider" "^2.2.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/node-config-provider" "^2.3.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/util-hex-encoding@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-hex-encoding/-/util-hex-encoding-2.1.1.tgz#978252b9fb242e0a59bae4ead491210688e0d15f" - integrity sha512-3UNdP2pkYUUBGEXzQI9ODTDK+Tcu1BlCyDBaRHwyxhA+8xLP8agEKQq4MGmpjqb4VQAjq9TwlCQX0kP6XDKYLg== +"@smithy/util-hex-encoding@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-hex-encoding/-/util-hex-encoding-2.2.0.tgz#87edb7c88c2f422cfca4bb21f1394ae9602c5085" + integrity sha512-7iKXR+/4TpLK194pVjKiasIyqMtTYJsgKgM242Y9uzt5dhHnUDvMNb+3xIhRJ9QhvqGii/5cRUt4fJn3dtXNHQ== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-middleware@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-2.1.1.tgz#903ba19bb17704f4b476fb9ade9bf9eb0174bc3d" - integrity sha512-mKNrk8oz5zqkNcbcgAAepeJbmfUW6ogrT2Z2gDbIUzVzNAHKJQTYmH9jcy0jbWb+m7ubrvXKb6uMjkSgAqqsFA== +"@smithy/util-middleware@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-2.2.0.tgz#80cfad40f6cca9ffe42a5899b5cb6abd53a50006" + integrity sha512-L1qpleXf9QD6LwLCJ5jddGkgWyuSvWBkJwWAZ6kFkdifdso+sk3L3O1HdmPvCdnCK3IS4qWyPxev01QMnfHSBw== dependencies: - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/util-retry@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-retry/-/util-retry-2.1.1.tgz#f2d3566b6e5b841028c7240c852007d4037e49b2" - integrity sha512-Mg+xxWPTeSPrthpC5WAamJ6PW4Kbo01Fm7lWM1jmGRvmrRdsd3192Gz2fBXAMURyXpaNxyZf6Hr/nQ4q70oVEA== +"@smithy/util-retry@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-retry/-/util-retry-2.2.0.tgz#e8e019537ab47ba6b2e87e723ec51ee223422d85" + integrity sha512-q9+pAFPTfftHXRytmZ7GzLFFrEGavqapFc06XxzZFcSIGERXMerXxCitjOG1prVDR9QdjqotF40SWvbqcCpf8g== dependencies: - "@smithy/service-error-classification" "^2.1.1" - "@smithy/types" "^2.9.1" - tslib "^2.5.0" + "@smithy/service-error-classification" "^2.1.5" + "@smithy/types" "^2.12.0" + tslib "^2.6.2" -"@smithy/util-stream@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-stream/-/util-stream-2.1.1.tgz#3ae0e88c3a1a45899e29c1655d2e5a3865b6c0a6" - integrity sha512-J7SMIpUYvU4DQN55KmBtvaMc7NM3CZ2iWICdcgaovtLzseVhAqFRYqloT3mh0esrFw+3VEK6nQFteFsTqZSECQ== - dependencies: - "@smithy/fetch-http-handler" "^2.4.1" - "@smithy/node-http-handler" "^2.3.1" - "@smithy/types" "^2.9.1" - "@smithy/util-base64" "^2.1.1" - "@smithy/util-buffer-from" "^2.1.1" - "@smithy/util-hex-encoding" "^2.1.1" - "@smithy/util-utf8" "^2.1.1" - tslib "^2.5.0" - -"@smithy/util-uri-escape@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-uri-escape/-/util-uri-escape-2.1.1.tgz#7eedc93b73ecda68f12fb9cf92e9fa0fbbed4d83" - integrity sha512-saVzI1h6iRBUVSqtnlOnc9ssU09ypo7n+shdQ8hBTZno/9rZ3AuRYvoHInV57VF7Qn7B+pFJG7qTzFiHxWlWBw== +"@smithy/util-stream@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-stream/-/util-stream-2.2.0.tgz#b1279e417992a0f74afa78d7501658f174ed7370" + integrity sha512-17faEXbYWIRst1aU9SvPZyMdWmqIrduZjVOqCPMIsWFNxs5yQQgFrJL6b2SdiCzyW9mJoDjFtgi53xx7EH+BXA== + dependencies: + "@smithy/fetch-http-handler" "^2.5.0" + "@smithy/node-http-handler" "^2.5.0" + "@smithy/types" "^2.12.0" + "@smithy/util-base64" "^2.3.0" + "@smithy/util-buffer-from" "^2.2.0" + "@smithy/util-hex-encoding" "^2.2.0" + "@smithy/util-utf8" "^2.3.0" + tslib "^2.6.2" + +"@smithy/util-uri-escape@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@smithy/util-uri-escape/-/util-uri-escape-2.2.0.tgz#56f5764051a33b67bc93fdd2a869f971b0635406" + integrity sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA== dependencies: - tslib "^2.5.0" + tslib "^2.6.2" -"@smithy/util-utf8@^2.1.1": - version "2.1.1" - resolved "https://registry.yarnpkg.com/@smithy/util-utf8/-/util-utf8-2.1.1.tgz#690018dd28f47f014114497735e51417ea5900a6" - integrity sha512-BqTpzYEcUMDwAKr7/mVRUtHDhs6ZoXDi9NypMvMfOr/+u1NW7JgqodPDECiiLboEm6bobcPcECxzjtQh865e9A== +"@smithy/util-utf8@^2.3.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@smithy/util-utf8/-/util-utf8-2.3.0.tgz#dd96d7640363259924a214313c3cf16e7dd329c5" + integrity sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A== dependencies: - "@smithy/util-buffer-from" "^2.1.1" - tslib "^2.5.0" + "@smithy/util-buffer-from" "^2.2.0" + tslib "^2.6.2" "@tsconfig/node10@^1.0.7": - version "1.0.9" - resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.9.tgz#df4907fc07a886922637b15e02d4cebc4c0021b2" - integrity sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA== + version "1.0.11" + resolved "https://registry.yarnpkg.com/@tsconfig/node10/-/node10-1.0.11.tgz#6ee46400685f130e278128c7b38b7e031ff5b2f2" + integrity sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw== "@tsconfig/node12@^1.0.7": version "1.0.11" @@ -2462,9 +2452,9 @@ "@types/node" "*" "@types/config@^3.3.3": - version "3.3.3" - resolved "https://registry.yarnpkg.com/@types/config/-/config-3.3.3.tgz#3e9e286365bd02f8ca079898a14172df47010941" - integrity sha512-BB8DBAud88EgiAKlz8WQStzI771Kb6F3j4dioRJ4GD+tP4tzcZyMlz86aXuZT4s9hyesFORehMQE6eqtA5O+Vg== + version "3.3.4" + resolved "https://registry.yarnpkg.com/@types/config/-/config-3.3.4.tgz#e46d1ee74f9b0c53645f644dd5f3ba5bedc68d76" + integrity sha512-qFiTLnWy+TdPSMIXFHP+87lFXFRM4SXjRS+CSB66+56TrpLNw003y1sh7DGaaC1NGesxgKoT5FDy6dyA1Xju/g== "@types/connect@*": version "3.4.38" @@ -2493,6 +2483,13 @@ resolved "https://registry.yarnpkg.com/@types/crypto-js/-/crypto-js-4.2.2.tgz#771c4a768d94eb5922cc202a3009558204df0cea" integrity sha512-sDOLlVbHhXpAUAL0YHDUUwDZf3iN4Bwi4W6a0W0b+QcAezUbRtH4FVb+9J4h+XFPW7l/gQ9F8qC7P+Ec4k8QVQ== +"@types/express-prometheus-middleware@^1.2.1": + version "1.2.3" + resolved "https://registry.yarnpkg.com/@types/express-prometheus-middleware/-/express-prometheus-middleware-1.2.3.tgz#b470fb21745d519b524f16d21d6d14e7cd90d7bc" + integrity sha512-3B+/27E/pxkZOzTWHqSQOhi5IctzoEyX6F1izBxAjiC5oe6tHLDy0kNQAj31SGR3wjxfvH1dgZi9fzmKX90OBw== + dependencies: + "@types/express" "*" + "@types/express-serve-static-core@^4.17.33": version "4.17.43" resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.43.tgz#10d8444be560cb789c4735aea5eac6e5af45df54" @@ -2503,7 +2500,7 @@ "@types/range-parser" "*" "@types/send" "*" -"@types/express@^4.17.14": +"@types/express@*", "@types/express@^4.17.14": version "4.17.21" resolved "https://registry.yarnpkg.com/@types/express/-/express-4.17.21.tgz#c26d4a151e60efe0084b23dc3369ebc631ed192d" integrity sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ== @@ -2577,9 +2574,9 @@ integrity sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ== "@types/lodash@^4.14.186": - version "4.14.202" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.202.tgz#f09dbd2fb082d507178b2f2a5c7e74bd72ff98f8" - integrity sha512-OvlIYQK9tNneDlS0VN54LLd5uiPCBOp7gS5Z0f1mjoJYBrtStzgmJBxONW3U6OZqdtNzZPmn9BS/7WI7BFFcFQ== + version "4.17.0" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.17.0.tgz#d774355e41f372d5350a4d0714abb48194a489c3" + integrity sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA== "@types/luxon@~3.3.0": version "3.3.8" @@ -2608,10 +2605,10 @@ resolved "https://registry.yarnpkg.com/@types/node-cron/-/node-cron-3.0.11.tgz#70b7131f65038ae63cfe841354c8aba363632344" integrity sha512-0ikrnug3/IyneSHqCBeslAhlK2aBfYek1fGo4bP4QnZPmiqSGRK+Oy7ZMisLWkesffJvQ1cqAcBnJC+8+nxIAg== -"@types/node@*", "@types/node@>=12.0.0", "@types/node@>=13.7.0": - version "20.11.16" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.11.16.tgz#4411f79411514eb8e2926f036c86c9f0e4ec6708" - integrity sha512-gKb0enTmRCzXSSUJDq6/sPcqrfCv2mkkG6Jt/clpn5eiCbKTY+SgZUxo+p8ZKMof5dCp9vHQUAB7wOUTod22wQ== +"@types/node@*", "@types/node@>=12.0.0", "@types/node@>=13.7.0", "@types/node@^20.10.5": + version "20.11.30" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.11.30.tgz#9c33467fc23167a347e73834f788f4b9f399d66f" + integrity sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw== dependencies: undici-types "~5.26.4" @@ -2620,13 +2617,6 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-12.20.55.tgz#c329cbd434c42164f846b909bd6f85b5537f6240" integrity sha512-J8xLz7q2OFulZ2cyGTLE1TbbZcjpno7FaN6zdJNrgAdrJ+DZzh/uFR6YrTb4C+nXakvud8Q4+rbhoIWlYQbUFQ== -"@types/node@^20.10.5": - version "20.11.17" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.11.17.tgz#cdd642d0e62ef3a861f88ddbc2b61e32578a9292" - integrity sha512-QmgQZGWu1Yw9TDyAP9ZzpFJKynYNeOvwMJmaxABfieQoVoiVOS6MN1WSpqpRcbeA5+RW82kraAVxCCJg+780Qw== - dependencies: - undici-types "~5.26.4" - "@types/numeral@^2.0.2": version "2.0.5" resolved "https://registry.yarnpkg.com/@types/numeral/-/numeral-2.0.5.tgz#388e5c4ff4b0e1787f130753cbbe83d3ba770858" @@ -2652,9 +2642,9 @@ integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA== "@types/qs@*": - version "6.9.11" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.11.tgz#208d8a30bc507bd82e03ada29e4732ea46a6bbda" - integrity sha512-oGk0gmhnEJK4Yyk+oI7EfXsLayXatCWPHary1MtcmbAifkobT9cM9yutG/hZKIseOU0MqbIwQ/u2nn/Gb+ltuQ== + version "6.9.14" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.14.tgz#169e142bfe493895287bee382af6039795e9b75b" + integrity sha512-5khscbd3SwWMhFqylJBLQ0zIu7c1K6Vz0uBIt915BI3zV0q1nfjRQD3RqSBcPaO6PHEF4ov/t9y89fSiyThlPA== "@types/range-parser@*": version "1.2.7" @@ -2674,9 +2664,9 @@ "@types/node" "*" "@types/semver@^7.3.12": - version "7.5.6" - resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.6.tgz#c65b2bfce1bec346582c07724e3f8c1017a20339" - integrity sha512-dn1l8LaMea/IjDoHNd9J52uBbInB796CDffS6VdIxvqYCPSG0V0DzHp76GpaWnlhg88uYyPbXCDIowa86ybd5A== + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== "@types/send@*": version "0.17.4" @@ -2977,7 +2967,7 @@ array-back@^4.0.1, array-back@^4.0.2: resolved "https://registry.yarnpkg.com/array-back/-/array-back-4.0.2.tgz#8004e999a6274586beeb27342168652fdb89fa1e" integrity sha512-NbdMezxqf94cnNfWLL7V/im0Ub+Anbb0IoZhvzie8+4HJ4nMQuzHuy49FkGYCJK2yAloZ3meiB6AVMClbrI1vg== -array-buffer-byte-length@^1.0.0, array-buffer-byte-length@^1.0.1: +array-buffer-byte-length@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f" integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg== @@ -2991,14 +2981,15 @@ array-flatten@1.1.1: integrity sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg== array-includes@^3.1.7: - version "3.1.7" - resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.7.tgz#8cd2e01b26f7a3086cbc87271593fe921c62abda" - integrity sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ== + version "3.1.8" + resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.8.tgz#5e370cbe172fdd5dd6530c1d4aadda25281ba97d" + integrity sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" - get-intrinsic "^1.2.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-object-atoms "^1.0.0" + get-intrinsic "^1.2.4" is-string "^1.0.7" array-union@^2.1.0: @@ -3006,26 +2997,16 @@ array-union@^2.1.0: resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== -array.prototype.filter@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/array.prototype.filter/-/array.prototype.filter-1.0.3.tgz#423771edeb417ff5914111fff4277ea0624c0d0e" - integrity sha512-VizNcj/RGJiUyQBgzwxzE5oHdeuXY5hSbbmKMlphj1cy1Vl7Pn2asCGbSrru6hSQjmCzqTBPVWAF/whmEOVHbw== - dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" - es-array-method-boxes-properly "^1.0.0" - is-string "^1.0.7" - array.prototype.findlastindex@^1.2.3: - version "1.2.4" - resolved "https://registry.yarnpkg.com/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.4.tgz#d1c50f0b3a9da191981ff8942a0aedd82794404f" - integrity sha512-hzvSHUshSpCflDR1QMUBLHGHP1VIEBegT4pix9H/Z92Xw3ySoy6c2qh7lJWTJnRJ8JCZ9bJNCgTyYaJGcJu6xQ== + version "1.2.5" + resolved "https://registry.yarnpkg.com/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz#8c35a755c72908719453f87145ca011e39334d0d" + integrity sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ== dependencies: - call-bind "^1.0.5" + call-bind "^1.0.7" define-properties "^1.2.1" - es-abstract "^1.22.3" + es-abstract "^1.23.2" es-errors "^1.3.0" + es-object-atoms "^1.0.0" es-shim-unscopables "^1.0.2" array.prototype.flat@^1.3.2: @@ -3048,7 +3029,7 @@ array.prototype.flatmap@^1.3.2: es-abstract "^1.22.1" es-shim-unscopables "^1.0.0" -arraybuffer.prototype.slice@^1.0.2: +arraybuffer.prototype.slice@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6" integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A== @@ -3079,10 +3060,12 @@ atomic-sleep@^1.0.0: resolved "https://registry.yarnpkg.com/atomic-sleep/-/atomic-sleep-1.0.0.tgz#eb85b77a601fc932cfe432c5acd364a9e2c9075b" integrity sha512-kNOjDqAh7px0XWNI+4QbzoiR/nTkHAWNud2uvnJquD1/x5a7EQZMJT0AczqK0Qn67oY/TTQ1LbUKajZpp3I9tQ== -available-typed-arrays@^1.0.5, available-typed-arrays@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.6.tgz#ac812d8ce5a6b976d738e1c45f08d0b00bc7d725" - integrity sha512-j1QzY8iPNPG4o4xmO3ptzpRxTciqD3MgEHtifP/YnJpIo58Xu+ne4BejlbkuaLfXn/nz6HFiw29bLpj2PNMdGg== +available-typed-arrays@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== + dependencies: + possible-typed-array-names "^1.0.0" axios@1.3.4: version "1.3.4" @@ -3094,11 +3077,11 @@ axios@1.3.4: proxy-from-env "^1.1.0" axios@^1.1.2, axios@^1.4.0, axios@^1.6.5: - version "1.6.7" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.7.tgz#7b48c2e27c96f9c68a2f8f31e2ab19f59b06b0a7" - integrity sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA== + version "1.6.8" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.8.tgz#66d294951f5d988a00e87a0ffb955316a619ea66" + integrity sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ== dependencies: - follow-redirects "^1.15.4" + follow-redirects "^1.15.6" form-data "^4.0.0" proxy-from-env "^1.1.0" @@ -3195,9 +3178,14 @@ bignumber.js@^9.0.0, bignumber.js@^9.0.1: integrity sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug== binary-extensions@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" - integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== + version "2.3.0" + resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== + +bintrees@1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/bintrees/-/bintrees-1.0.2.tgz#49f896d6e858a4a499df85c38fb399b9aff840f8" + integrity sha512-VOMgTMwjAaUG580SXn3LacVgjurrbMme7ZZNYGSSV7mmtY6QQRh0Eg3pwIcntQ77DErK1L0NxkbetjcoXzVwKw== blakejs@^1.1.0: version "1.2.1" @@ -3224,13 +3212,13 @@ bn.js@^5.1.2, bn.js@^5.2.0, bn.js@^5.2.1: resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-5.2.1.tgz#0bc527a6a0d18d0aa8d5b0538ce4a77dccfa7b70" integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ== -body-parser@1.20.1: - version "1.20.1" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" - integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== +body-parser@1.20.2: + version "1.20.2" + resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd" + integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA== dependencies: bytes "3.1.2" - content-type "~1.0.4" + content-type "~1.0.5" debug "2.6.9" depd "2.0.0" destroy "1.2.0" @@ -3238,7 +3226,7 @@ body-parser@1.20.1: iconv-lite "0.4.24" on-finished "2.4.1" qs "6.11.0" - raw-body "2.5.1" + raw-body "2.5.2" type-is "~1.6.18" unpipe "1.0.0" @@ -3287,12 +3275,12 @@ browserify-aes@^1.2.0: safe-buffer "^5.0.1" browserslist@^4.22.2: - version "4.22.3" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.22.3.tgz#299d11b7e947a6b843981392721169e27d60c5a6" - integrity sha512-UAp55yfwNv0klWNapjs/ktHoguxuQNGnOzxYmfnXIS+8AsRDZkSDxg7R1AX3GKzn078SBI5dzwzj/Yx0Or0e3A== + version "4.23.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab" + integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== dependencies: - caniuse-lite "^1.0.30001580" - electron-to-chromium "^1.4.648" + caniuse-lite "^1.0.30001587" + electron-to-chromium "^1.4.668" node-releases "^2.0.14" update-browserslist-db "^1.0.13" @@ -3333,10 +3321,10 @@ bson@^4.7.2: dependencies: buffer "^5.6.0" -bson@^6.2.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/bson/-/bson-6.3.0.tgz#d47acba525ba7d7eb0e816c10538bce26a337fe0" - integrity sha512-balJfqwwTBddxfnidJZagCBPP/f48zj9Sdp3OJswREOgsJzHiQSaOIAtApSgDQFYgHqAvFkp53AFSqjMDZoTFw== +bson@^6.4.0: + version "6.5.0" + resolved "https://registry.yarnpkg.com/bson/-/bson-6.5.0.tgz#fc4828d065e64e48ea442b1a23099b2e52f7ff0b" + integrity sha512-DXf1BTAS8vKyR90BO4x5v3rKVarmkdkzwOrnYDFdjAY694ILNDkmA3uRh1xXJEl+C1DAh8XCvAQ+Gh3kzubtpg== buffer-from@^1.0.0: version "1.1.2" @@ -3386,15 +3374,16 @@ bytes@3.1.2: resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== -call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.6.tgz#6c46675fc7a5e9de82d75a233d586c8b7ac0d931" - integrity sha512-Mj50FLHtlsoVfRfnHaZvyrooHcrlceNZdL/QBvJJVd9Ta55qCQK0gs4ss2oZDeV9zFCs6ewzYgVE5yfVmfFpVg== +call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== dependencies: + es-define-property "^1.0.0" es-errors "^1.3.0" function-bind "^1.1.2" - get-intrinsic "^1.2.3" - set-function-length "^1.2.0" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" callsites@^3.0.0: version "3.1.0" @@ -3411,10 +3400,10 @@ camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== -caniuse-lite@^1.0.30001580: - version "1.0.30001585" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001585.tgz#0b4e848d84919c783b2a41c13f7de8ce96744401" - integrity sha512-yr2BWR1yLXQ8fMpdS/4ZZXpseBgE7o4g41x3a6AJOqZuOi+iE/WdJYAuZ6Y95i4Ohd2Y+9MzIWRR+uGABH4s3Q== +caniuse-lite@^1.0.30001587: + version "1.0.30001600" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001600.tgz#93a3ee17a35aa6a9f0c6ef1b2ab49507d1ab9079" + integrity sha512-+2S9/2JFhYmYaDpZvo0lKkfvuKIglrx68MwOBqMGHhQsNkLjB5xtc/TGoEPs+MxjSyN/72qer2g97nzR641mOQ== cent.js@^2.0.5: version "2.1.6" @@ -3616,7 +3605,7 @@ content-disposition@0.5.4: dependencies: safe-buffer "5.2.1" -content-type@~1.0.4: +content-type@~1.0.4, content-type@~1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918" integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA== @@ -3631,10 +3620,10 @@ cookie-signature@1.0.6: resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ== -cookie@0.5.0: - version "0.5.0" - resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" - integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== +cookie@0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.6.0.tgz#2798b04b071b0ecbff0dbb62a505a8efa4e19051" + integrity sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw== core-util-is@~1.0.0: version "1.0.3" @@ -3724,13 +3713,40 @@ crypto-randomuuid@^1.0.0: resolved "https://registry.yarnpkg.com/crypto-randomuuid/-/crypto-randomuuid-1.0.0.tgz#acf583e5e085e867ae23e107ff70279024f9e9e7" integrity sha512-/RC5F4l1SCqD/jazwUF6+t34Cd8zTSAGZ7rvvZu1whZUhD2a5MOGKjSGowoGcpj/cbVZk1ZODIooJEQQq3nNAA== -d@1, d@^1.0.1: +d@1, d@^1.0.1, d@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/d/-/d-1.0.2.tgz#2aefd554b81981e7dccf72d6842ae725cb17e5de" + integrity sha512-MOqHvMWF9/9MX6nza0KgvFH4HpMU0EF5uUDXqX/BtxtU8NfB0QzRtJ8Oe/6SuS4kbhyzVJwjd97EA4PKrzJ8bw== + dependencies: + es5-ext "^0.10.64" + type "^2.7.2" + +data-view-buffer@^1.0.1: version "1.0.1" - resolved "https://registry.yarnpkg.com/d/-/d-1.0.1.tgz#8698095372d58dbee346ffd0c7093f99f8f9eb5a" - integrity sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA== + resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2" + integrity sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA== dependencies: - es5-ext "^0.10.50" - type "^1.0.1" + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz#90721ca95ff280677eb793749fce1011347669e2" + integrity sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-data-view "^1.0.1" + +data-view-byte-offset@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz#5e0bbfb4828ed2d1b9b400cd8a7d119bca0ff18a" + integrity sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-data-view "^1.0.1" date-fns@^2.29.1: version "2.30.0" @@ -3744,26 +3760,26 @@ dateformat@^4.6.3: resolved "https://registry.yarnpkg.com/dateformat/-/dateformat-4.6.3.tgz#556fa6497e5217fedb78821424f8a1c22fa3f4b5" integrity sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA== -dc-polyfill@^0.1.2: +dc-polyfill@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/dc-polyfill/-/dc-polyfill-0.1.4.tgz#4118cec81a8fab9a5729c41c285c715ffa42495a" integrity sha512-8iwEduR2jR9wWYggeaYtYZWRiUe3XZPyAQtMTL1otv8X3kfR8xUIVb4l5awHEeyDrH6Je7N324lKzMKlMMN6Yw== dd-trace@^4.17.0: - version "4.26.0" - resolved "https://registry.yarnpkg.com/dd-trace/-/dd-trace-4.26.0.tgz#ebd7b0d2a30192761660b8001b64c6d31f2592be" - integrity sha512-qG1r1ERapurxbLSzox+LSVlM3QybblOCm/fP0dvClo+lH2E2ar4XmuX/2XoKNm43X4cCDFJcVmbvFQFwChHtWw== + version "4.33.0" + resolved "https://registry.yarnpkg.com/dd-trace/-/dd-trace-4.33.0.tgz#b4172018834e84aabc991213084532adb0628cbe" + integrity sha512-hO+2cIIiZJ8lbjRiIMs5mdMXNcTyi/D/kGreEnh/HuZPptngsF7HpDRFxhVls1hFOg1y4uYqYCUcTT5U0KXrHA== dependencies: - "@datadog/native-appsec" "7.0.0" - "@datadog/native-iast-rewriter" "2.2.2" - "@datadog/native-iast-taint-tracking" "1.6.4" + "@datadog/native-appsec" "7.1.0" + "@datadog/native-iast-rewriter" "2.3.0" + "@datadog/native-iast-taint-tracking" "1.7.0" "@datadog/native-metrics" "^2.0.0" - "@datadog/pprof" "5.0.0" + "@datadog/pprof" "5.2.0" "@datadog/sketches-js" "^2.1.0" "@opentelemetry/api" "^1.0.0" "@opentelemetry/core" "^1.14.0" crypto-randomuuid "^1.0.0" - dc-polyfill "^0.1.2" + dc-polyfill "^0.1.4" ignore "^5.2.4" import-in-the-middle "^1.7.3" int64-buffer "^0.1.9" @@ -3780,10 +3796,11 @@ dd-trace@^4.17.0: node-abort-controller "^3.1.1" opentracing ">=0.12.1" path-to-regexp "^0.1.2" - pprof-format "^2.0.7" + pprof-format "^2.1.0" protobufjs "^7.2.5" retry "^0.13.1" semver "^7.5.4" + shell-quote "^1.8.1" tlhunter-sorted-set "^0.1.0" debug@2.6.9, debug@^2.2.0: @@ -3827,15 +3844,14 @@ deepmerge@^4.2.2: resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== -define-data-property@^1.0.1, define-data-property@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.2.tgz#f3c33b4f0102360cd7c0f5f28700f5678510b63a" - integrity sha512-SRtsSqsDbgpJBbW3pABMCOt6rQyeM8s8RiyeSN8jYG8sYmt/kGJejbydttUsnDs1tadr19tvhT4ShwMyoqAm4g== +define-data-property@^1.0.1, define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== dependencies: + es-define-property "^1.0.0" es-errors "^1.3.0" - get-intrinsic "^1.2.2" gopd "^1.0.1" - has-property-descriptors "^1.0.1" define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: version "1.2.1" @@ -3866,6 +3882,11 @@ depd@2.0.0: resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== +depd@~1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.2.tgz#9bcd52e14c097763e749b274c4346ed2e560b5a9" + integrity sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ== + destroy@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" @@ -3908,21 +3929,21 @@ doctrine@^3.0.0: esutils "^2.0.2" dotenv@^16.0.3: - version "16.4.1" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.1.tgz#1d9931f1d3e5d2959350d1250efab299561f7f11" - integrity sha512-CjA3y+Dr3FyFDOAMnxZEGtnW9KBR2M0JvvUtXNW+dYJL5ROWxP9DUHCwgFqpMk0OXCc0ljhaNTr2w/kutYIcHQ== + version "16.4.5" + resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-16.4.5.tgz#cdd3b3b604cb327e286b4762e13502f717cb099f" + integrity sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg== ee-first@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== -electron-to-chromium@^1.4.648: - version "1.4.661" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.661.tgz#b28d63468b06e75610ed2b0f8e5f5f669a57bd91" - integrity sha512-AFg4wDHSOk5F+zA8aR+SVIOabu7m0e7BiJnigCvPXzIGy731XENw/lmNxTySpVFtkFEy+eyt4oHhh5FF3NjQNw== +electron-to-chromium@^1.4.668: + version "1.4.717" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.717.tgz#99db370cae8cd090d5b01f8748e9ad369924d0f8" + integrity sha512-6Fmg8QkkumNOwuZ/5mIbMU9WI3H2fmn5ajcVya64I5Yr5CcNmO7vcLt0Y7c96DCiMO5/9G+4sI2r6eEvdg1F7A== -elliptic@6.5.4, elliptic@^6.5.4: +elliptic@6.5.4: version "6.5.4" resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.4.tgz#da37cebd31e79a1367e941b592ed1fbebd58abbb" integrity sha512-iLhC6ULemrljPZb+QutR5TQGB+pdW6KGD5RSegS+8sorOZT+rdQFbsQFJgvN3eRqNALqJer4oQ16YvJHlU8hzQ== @@ -3935,6 +3956,19 @@ elliptic@6.5.4, elliptic@^6.5.4: minimalistic-assert "^1.0.1" minimalistic-crypto-utils "^1.0.1" +elliptic@^6.5.4: + version "6.5.5" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.5.tgz#c715e09f78b6923977610d4c2346d6ce22e6dded" + integrity sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw== + dependencies: + bn.js "^4.11.9" + brorand "^1.1.0" + hash.js "^1.0.0" + hmac-drbg "^1.0.1" + inherits "^2.0.4" + minimalistic-assert "^1.0.1" + minimalistic-crypto-utils "^1.0.1" + emittery@^0.13.1: version "0.13.1" resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" @@ -3978,69 +4012,85 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -es-abstract@^1.22.1, es-abstract@^1.22.3: - version "1.22.3" - resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.3.tgz#48e79f5573198de6dee3589195727f4f74bc4f32" - integrity sha512-eiiY8HQeYfYH2Con2berK+To6GrK2RxbPawDkGq4UiCQQfZHb6wX9qQqkbpPqaxQFcl8d9QzZqo0tGE0VcrdwA== +es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2: + version "1.23.2" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.2.tgz#693312f3940f967b8dd3eebacb590b01712622e0" + integrity sha512-60s3Xv2T2p1ICykc7c+DNDPLDMm9t4QxCOUU0K9JxiLjM3C1zB9YVdN7tjxrFd4+AkZ8CdX1ovUga4P2+1e+/w== dependencies: - array-buffer-byte-length "^1.0.0" - arraybuffer.prototype.slice "^1.0.2" - available-typed-arrays "^1.0.5" - call-bind "^1.0.5" - es-set-tostringtag "^2.0.1" + array-buffer-byte-length "^1.0.1" + arraybuffer.prototype.slice "^1.0.3" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + data-view-buffer "^1.0.1" + data-view-byte-length "^1.0.1" + data-view-byte-offset "^1.0.0" + es-define-property "^1.0.0" + es-errors "^1.3.0" + es-object-atoms "^1.0.0" + es-set-tostringtag "^2.0.3" es-to-primitive "^1.2.1" function.prototype.name "^1.1.6" - get-intrinsic "^1.2.2" - get-symbol-description "^1.0.0" + get-intrinsic "^1.2.4" + get-symbol-description "^1.0.2" globalthis "^1.0.3" gopd "^1.0.1" - has-property-descriptors "^1.0.0" - has-proto "^1.0.1" + has-property-descriptors "^1.0.2" + has-proto "^1.0.3" has-symbols "^1.0.3" - hasown "^2.0.0" - internal-slot "^1.0.5" - is-array-buffer "^3.0.2" + hasown "^2.0.2" + internal-slot "^1.0.7" + is-array-buffer "^3.0.4" is-callable "^1.2.7" - is-negative-zero "^2.0.2" + is-data-view "^1.0.1" + is-negative-zero "^2.0.3" is-regex "^1.1.4" - is-shared-array-buffer "^1.0.2" + is-shared-array-buffer "^1.0.3" is-string "^1.0.7" - is-typed-array "^1.1.12" + is-typed-array "^1.1.13" is-weakref "^1.0.2" object-inspect "^1.13.1" object-keys "^1.1.1" - object.assign "^4.1.4" - regexp.prototype.flags "^1.5.1" - safe-array-concat "^1.0.1" - safe-regex-test "^1.0.0" - string.prototype.trim "^1.2.8" - string.prototype.trimend "^1.0.7" + object.assign "^4.1.5" + regexp.prototype.flags "^1.5.2" + safe-array-concat "^1.1.2" + safe-regex-test "^1.0.3" + string.prototype.trim "^1.2.9" + string.prototype.trimend "^1.0.8" string.prototype.trimstart "^1.0.7" - typed-array-buffer "^1.0.0" - typed-array-byte-length "^1.0.0" - typed-array-byte-offset "^1.0.0" - typed-array-length "^1.0.4" + typed-array-buffer "^1.0.2" + typed-array-byte-length "^1.0.1" + typed-array-byte-offset "^1.0.2" + typed-array-length "^1.0.5" unbox-primitive "^1.0.2" - which-typed-array "^1.1.13" + which-typed-array "^1.1.15" -es-array-method-boxes-properly@^1.0.0: +es-define-property@^1.0.0: version "1.0.0" - resolved "https://registry.yarnpkg.com/es-array-method-boxes-properly/-/es-array-method-boxes-properly-1.0.0.tgz#873f3e84418de4ee19c5be752990b2e44718d09e" - integrity sha512-wd6JXUmyHmt8T5a2xreUwKcGPq6f1f+WwIJkijUqiGcJz1qqnZgP6XIK+QyIWU5lT7imeNxUll48bziG+TSYcA== + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" -es-errors@^1.0.0, es-errors@^1.2.1, es-errors@^1.3.0: +es-errors@^1.2.1, es-errors@^1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== -es-set-tostringtag@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9" - integrity sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q== +es-object-atoms@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.0.0.tgz#ddb55cd47ac2e240701260bc2a8e31ecb643d941" + integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw== dependencies: - get-intrinsic "^1.2.2" - has-tostringtag "^1.0.0" - hasown "^2.0.0" + es-errors "^1.3.0" + +es-set-tostringtag@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777" + integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ== + dependencies: + get-intrinsic "^1.2.4" + has-tostringtag "^1.0.2" + hasown "^2.0.1" es-shim-unscopables@^1.0.0, es-shim-unscopables@^1.0.2: version "1.0.2" @@ -4058,13 +4108,14 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" -es5-ext@^0.10.35, es5-ext@^0.10.50: - version "0.10.62" - resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.62.tgz#5e6adc19a6da524bf3d1e02bbc8960e5eb49a9a5" - integrity sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA== +es5-ext@^0.10.35, es5-ext@^0.10.50, es5-ext@^0.10.62, es5-ext@^0.10.64, es5-ext@~0.10.14: + version "0.10.64" + resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.64.tgz#12e4ffb48f1ba2ea777f1fcdd1918ef73ea21714" + integrity sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg== dependencies: es6-iterator "^2.0.3" es6-symbol "^3.1.3" + esniff "^2.0.1" next-tick "^1.1.0" es6-iterator@^2.0.3: @@ -4082,12 +4133,12 @@ es6-promise@^4.2.8: integrity sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w== es6-symbol@^3.1.1, es6-symbol@^3.1.3: - version "3.1.3" - resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.3.tgz#bad5d3c1bcdac28269f4cb331e431c78ac705d18" - integrity sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA== + version "3.1.4" + resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.4.tgz#f4e7d28013770b4208ecbf3e0bf14d3bcb557b8c" + integrity sha512-U9bFFjX8tFiATgtkJ1zg25+KviIXpgRvRHS8sau3GfhVzThRQrOeksPeT0BWW2MNZs1OEWJ1DPXOQMn0KKRkvg== dependencies: - d "^1.0.1" - ext "^1.1.2" + d "^1.0.2" + ext "^1.7.0" escalade@^3.1.1: version "3.1.2" @@ -4146,9 +4197,9 @@ eslint-import-resolver-node@^0.3.9: resolve "^1.22.4" eslint-module-utils@^2.8.0: - version "2.8.0" - resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz#e439fee65fc33f6bba630ff621efc38ec0375c49" - integrity sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw== + version "2.8.1" + resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz#52f2404300c3bd33deece9d7372fb337cc1d7c34" + integrity sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q== dependencies: debug "^3.2.7" @@ -4213,15 +4264,15 @@ eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4 integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== eslint@^8.25.0: - version "8.56.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.56.0.tgz#4957ce8da409dc0809f99ab07a1b94832ab74b15" - integrity sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ== + version "8.57.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.57.0.tgz#c786a6fd0e0b68941aaf624596fb987089195668" + integrity sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ== dependencies: "@eslint-community/eslint-utils" "^4.2.0" "@eslint-community/regexpp" "^4.6.1" "@eslint/eslintrc" "^2.1.4" - "@eslint/js" "8.56.0" - "@humanwhocodes/config-array" "^0.11.13" + "@eslint/js" "8.57.0" + "@humanwhocodes/config-array" "^0.11.14" "@humanwhocodes/module-importer" "^1.0.1" "@nodelib/fs.walk" "^1.2.8" "@ungap/structured-clone" "^1.2.0" @@ -4256,6 +4307,16 @@ eslint@^8.25.0: strip-ansi "^6.0.1" text-table "^0.2.0" +esniff@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/esniff/-/esniff-2.0.1.tgz#a4d4b43a5c71c7ec51c51098c1d8a29081f9b308" + integrity sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg== + dependencies: + d "^1.0.1" + es5-ext "^0.10.62" + event-emitter "^0.3.5" + type "^2.7.2" + espree@^9.6.0, espree@^9.6.1: version "9.6.1" resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f" @@ -4397,6 +4458,14 @@ ethjs-unit@0.1.6: bn.js "4.11.6" number-to-bn "1.7.0" +event-emitter@^0.3.5: + version "0.3.5" + resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39" + integrity sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA== + dependencies: + d "1" + es5-ext "~0.10.14" + event-lite@^0.1.1: version "0.1.3" resolved "https://registry.yarnpkg.com/event-lite/-/event-lite-0.1.3.tgz#3dfe01144e808ac46448f0c19b4ab68e403a901d" @@ -4480,17 +4549,27 @@ express-handlebars@^6.0.6: graceful-fs "^4.2.10" handlebars "^4.7.7" +express-prometheus-middleware@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/express-prometheus-middleware/-/express-prometheus-middleware-1.2.0.tgz#ce913ffe4282411ab0d3d2db095771292801cb18" + integrity sha512-efSwft67rdtiW40D0im1f7Rz1TCGHGzPj6lfK0MxZDcPj6z4f/Ab5VNkWPYZEjvLqZiZ7fbS00CYzpigO8tS+g== + dependencies: + response-time "^2.3.2" + url-value-parser "^2.0.0" + optionalDependencies: + prometheus-gc-stats "^0.6.2" + express@^4.18.2: - version "4.18.2" - resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59" - integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ== + version "4.19.2" + resolved "https://registry.yarnpkg.com/express/-/express-4.19.2.tgz#e25437827a3aa7f2a827bc8171bbbb664a356465" + integrity sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q== dependencies: accepts "~1.3.8" array-flatten "1.1.1" - body-parser "1.20.1" + body-parser "1.20.2" content-disposition "0.5.4" content-type "~1.0.4" - cookie "0.5.0" + cookie "0.6.0" cookie-signature "1.0.6" debug "2.6.9" depd "2.0.0" @@ -4517,7 +4596,7 @@ express@^4.18.2: utils-merge "1.0.1" vary "~1.1.2" -ext@^1.1.2: +ext@^1.7.0: version "1.7.0" resolved "https://registry.yarnpkg.com/ext/-/ext-1.7.0.tgz#0ea4383c0103d60e70be99e9a7f11027a33c4f5f" integrity sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw== @@ -4525,9 +4604,9 @@ ext@^1.1.2: type "^2.7.2" fast-copy@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/fast-copy/-/fast-copy-3.0.1.tgz#9e89ef498b8c04c1cd76b33b8e14271658a732aa" - integrity sha512-Knr7NOtK3HWRYGtHoJrjkaWepqT8thIVGAwt0p0aUs1zqkAzXZV4vo9fFNwyb5fcqK1GKYFYxldQdIDVKhUAfA== + version "3.0.2" + resolved "https://registry.yarnpkg.com/fast-copy/-/fast-copy-3.0.2.tgz#59c68f59ccbcac82050ba992e0d5c389097c9d35" + integrity sha512-dl0O9Vhju8IrcLndv2eU4ldt1ftXMqqfgN4H1cpmGV7P6jeB9FwpN9a2c8DPGE1Ys88rNUJVYDHq73CGAGOPfQ== fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: version "3.1.3" @@ -4561,9 +4640,9 @@ fast-levenshtein@^2.0.6: integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== fast-redact@^3.1.1: - version "3.3.0" - resolved "https://registry.yarnpkg.com/fast-redact/-/fast-redact-3.3.0.tgz#7c83ce3a7be4898241a46560d51de10f653f7634" - integrity sha512-6T5V1QK1u4oF+ATxs1lWUmlEk6P2T9HqJG3e2DnHOdVgZy2rFJBoEnrIedcTXlkAHU/zKC+7KETJ+KGGKwxgMQ== + version "3.5.0" + resolved "https://registry.yarnpkg.com/fast-redact/-/fast-redact-3.5.0.tgz#e9ea02f7e57d0cd8438180083e93077e496285e4" + integrity sha512-dwsoQlS7h9hMeYUq1W++23NDcBLV4KqONnITDV9DjfS3q1SgDGVrBdvvTLUotWtPSD7asWDV9/CmsZPy8Hf70A== fast-safe-stringify@^2.1.1: version "2.1.1" @@ -4651,14 +4730,14 @@ flat-cache@^3.0.4: rimraf "^3.0.2" flatted@^3.2.9: - version "3.2.9" - resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.9.tgz#7eb4c67ca1ba34232ca9d2d93e9886e611ad7daf" - integrity sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ== + version "3.3.1" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a" + integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw== -follow-redirects@^1.15.0, follow-redirects@^1.15.4: - version "1.15.5" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020" - integrity sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw== +follow-redirects@^1.15.0, follow-redirects@^1.15.6: + version "1.15.6" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b" + integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA== for-each@^0.3.3: version "0.3.3" @@ -4734,6 +4813,14 @@ functions-have-names@^1.2.3: resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== +gc-stats@^1.4.0: + version "1.4.1" + resolved "https://registry.yarnpkg.com/gc-stats/-/gc-stats-1.4.1.tgz#c9142dc54d02af9e93472b0bd972e6bacecc7be5" + integrity sha512-eAvDBpI6UjVIYwLxshPCJJIkPyfamIrJzBtW/103+ooJWkISS+chVnHNnsZ+ubaw2607rFeiRDNWHkNUA+ioqg== + dependencies: + nan "^2.18.0" + node-gyp-build "^4.8.0" + generic-pool@3.9.0: version "3.9.0" resolved "https://registry.yarnpkg.com/generic-pool/-/generic-pool-3.9.0.tgz#36f4a678e963f4fdb8707eab050823abc4e8f5e4" @@ -4749,7 +4836,7 @@ get-caller-file@^2.0.5: resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== -get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: +get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== @@ -4770,7 +4857,7 @@ get-stream@^6.0.0: resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== -get-symbol-description@^1.0.0: +get-symbol-description@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5" integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg== @@ -4903,24 +4990,24 @@ has-flag@^4.0.0: resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== -has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz#52ba30b6c5ec87fd89fa574bc1c39125c6f65340" - integrity sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg== +has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== dependencies: - get-intrinsic "^1.2.2" + es-define-property "^1.0.0" -has-proto@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" - integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== +has-proto@^1.0.1, has-proto@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" + integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== has-symbols@^1.0.2, has-symbols@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== -has-tostringtag@^1.0.0, has-tostringtag@^1.0.1: +has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== @@ -4944,10 +5031,10 @@ hash.js@1.1.7, hash.js@^1.0.0, hash.js@^1.0.3, hash.js@^1.1.7: inherits "^2.0.3" minimalistic-assert "^1.0.1" -hasown@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" - integrity sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA== +hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== dependencies: function-bind "^1.1.2" @@ -5077,7 +5164,7 @@ int64-buffer@^0.1.9: resolved "https://registry.yarnpkg.com/int64-buffer/-/int64-buffer-0.1.10.tgz#277b228a87d95ad777d07c13832022406a473423" integrity sha512-v7cSY1J8ydZ0GyjUHqF+1bshJ6cnEVLo9EnjB8p+4HDRPZc9N5jjmvUV7NvEsqQOKyH0pmIBFWXVQbiS0+OBbA== -internal-slot@^1.0.5: +internal-slot@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802" integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g== @@ -5101,10 +5188,13 @@ ioredis@^5.2.4: redis-parser "^3.0.0" standard-as-callback "^2.1.0" -ip@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ip/-/ip-2.0.0.tgz#4cf4ab182fee2314c75ede1276f8c80b479936da" - integrity sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ== +ip-address@^9.0.5: + version "9.0.5" + resolved "https://registry.yarnpkg.com/ip-address/-/ip-address-9.0.5.tgz#117a960819b08780c3bd1f14ef3c1cc1d3f3ea5a" + integrity sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g== + dependencies: + jsbn "1.1.0" + sprintf-js "^1.1.3" ipaddr.js@1.9.1: version "1.9.1" @@ -5124,7 +5214,7 @@ is-arguments@^1.0.4: call-bind "^1.0.2" has-tostringtag "^1.0.0" -is-array-buffer@^3.0.2, is-array-buffer@^3.0.4: +is-array-buffer@^3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98" integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw== @@ -5171,6 +5261,13 @@ is-core-module@^2.1.0, is-core-module@^2.13.0, is-core-module@^2.13.1: dependencies: hasown "^2.0.0" +is-data-view@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f" + integrity sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w== + dependencies: + is-typed-array "^1.1.13" + is-date-object@^1.0.1: version "1.0.5" resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" @@ -5217,10 +5314,10 @@ is-hex-prefixed@1.0.0: resolved "https://registry.yarnpkg.com/is-hex-prefixed/-/is-hex-prefixed-1.0.0.tgz#7d8d37e6ad77e5d127148913c573e082d777f554" integrity sha512-WvtOiug1VFrE9v1Cydwm+FnXd3+w9GaeVUss5W4v/SLy3UW00vP+6iNF2SdnfiBoLy4bTqVdkftNGTUeOFVsbA== -is-negative-zero@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz#7bf6f03a28003b8b3965de3ac26f664d765f3150" - integrity sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA== +is-negative-zero@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" + integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== is-number-object@^1.0.4: version "1.0.7" @@ -5247,12 +5344,12 @@ is-regex@^1.1.4: call-bind "^1.0.2" has-tostringtag "^1.0.0" -is-shared-array-buffer@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79" - integrity sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA== +is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688" + integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg== dependencies: - call-bind "^1.0.2" + call-bind "^1.0.7" is-stream@^1.1.0: version "1.1.0" @@ -5278,7 +5375,7 @@ is-symbol@^1.0.2, is-symbol@^1.0.3: dependencies: has-symbols "^1.0.2" -is-typed-array@^1.1.10, is-typed-array@^1.1.12, is-typed-array@^1.1.13, is-typed-array@^1.1.3, is-typed-array@^1.1.9: +is-typed-array@^1.1.13, is-typed-array@^1.1.3: version "1.1.13" resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229" integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw== @@ -5344,13 +5441,13 @@ istanbul-lib-instrument@^5.0.4: semver "^6.3.0" istanbul-lib-instrument@^6.0.0: - version "6.0.1" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz#71e87707e8041428732518c6fb5211761753fbdf" - integrity sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA== + version "6.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.2.tgz#91655936cf7380e4e473383081e38478b69993b1" + integrity sha512-1WUsZ9R1lA0HtBSohTkm39WTPlNKSJ5iFk7UwqXkBLoHQT+hfqPsfsTDVuZdKGaBwn7din9bS7SsnoAr943hvw== dependencies: - "@babel/core" "^7.12.3" - "@babel/parser" "^7.14.7" - "@istanbuljs/schema" "^0.1.2" + "@babel/core" "^7.23.9" + "@babel/parser" "^7.23.9" + "@istanbuljs/schema" "^0.1.3" istanbul-lib-coverage "^3.2.0" semver "^7.5.4" @@ -5373,9 +5470,9 @@ istanbul-lib-source-maps@^4.0.0: source-map "^0.6.1" istanbul-reports@^3.1.3: - version "3.1.6" - resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.6.tgz#2544bcab4768154281a2f0870471902704ccaa1a" - integrity sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg== + version "3.1.7" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.7.tgz#daed12b9e1dca518e15c056e1e537e741280fa0b" + integrity sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g== dependencies: html-escaper "^2.0.0" istanbul-lib-report "^3.0.0" @@ -5744,9 +5841,9 @@ jju@~1.4.0: integrity sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA== joi@^17.6.3: - version "17.12.1" - resolved "https://registry.yarnpkg.com/joi/-/joi-17.12.1.tgz#3347ecf4cd3301962d42191c021b165eef1f395b" - integrity sha512-vtxmq+Lsc5SlfqotnfVjlViWfOL9nt/avKNbKYizwf6gsCfq9NYY/ceYRMFD8XDdrjJ9abJyScWmhmIiy+XRtQ== + version "17.12.2" + resolved "https://registry.yarnpkg.com/joi/-/joi-17.12.2.tgz#283a664dabb80c7e52943c557aab82faea09f521" + integrity sha512-RonXAIzCiHLc8ss3Ibuz45u28GOsWE1UpfDXLbN/9NKbL4tCJf8TWYVKsoYuuh+sAUt7fsSNpA+r2+TBA6Wjmw== dependencies: "@hapi/hoek" "^9.3.0" "@hapi/topo" "^5.1.0" @@ -5784,6 +5881,11 @@ js-yaml@^4.1.0: dependencies: argparse "^2.0.1" +jsbn@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/jsbn/-/jsbn-1.1.0.tgz#b01307cb29b618a1ed26ec79e911f803c4da0040" + integrity sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A== + jsesc@^2.5.1: version "2.5.2" resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" @@ -6164,18 +6266,18 @@ mongodb@4.17.2: "@mongodb-js/saslprep" "^1.1.0" mongodb@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/mongodb/-/mongodb-6.3.0.tgz#ec9993b19f7ed2ea715b903fcac6171c9d1d38ca" - integrity sha512-tt0KuGjGtLUhLoU263+xvQmPHEGTw5LbcNC73EoFRYgSHwZt5tsoJC110hDyO1kjQzpgNrpdcSza9PknWN4LrA== + version "6.5.0" + resolved "https://registry.yarnpkg.com/mongodb/-/mongodb-6.5.0.tgz#3735b4fba085b26ca06f7744e9639bc538e93d87" + integrity sha512-Fozq68InT+JKABGLqctgtb8P56pRrJFkbhW0ux+x1mdHeyinor8oNzJqwLjV/t5X5nJGfTlluxfyMnOXNggIUA== dependencies: - "@mongodb-js/saslprep" "^1.1.0" - bson "^6.2.0" + "@mongodb-js/saslprep" "^1.1.5" + bson "^6.4.0" mongodb-connection-string-url "^3.0.0" mongoose@^6.6.5: - version "6.12.6" - resolved "https://registry.yarnpkg.com/mongoose/-/mongoose-6.12.6.tgz#7474de5f125f88d372ac759116dc57aed5a22cb1" - integrity sha512-VFxDnWj8esgswwplmpQYMT+lYcvuIhl76WDLz/vgp41/FOhBPM/n3GjyztK8R3r2ljsM6kudvKgqLhfcZEih1Q== + version "6.12.7" + resolved "https://registry.yarnpkg.com/mongoose/-/mongoose-6.12.7.tgz#97adb534424b2a87a440a592913aae1c12068fc4" + integrity sha512-v3AkUsgHspF8/R4ph5YaF8g+36O7LbvGwlPBbi5VDx+ocT1+t5+HJ8ZqG864l7KwJtWmuWTGGSSz/jg+ydXq3g== dependencies: bson "^4.7.2" kareem "2.5.1" @@ -6222,6 +6324,11 @@ msgpack-lite@^0.1.26: int64-buffer "^0.1.9" isarray "^1.0.0" +nan@^2.18.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.19.0.tgz#bb58122ad55a6c5bc973303908d5b16cfdd5a8c0" + integrity sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw== + natural-compare-lite@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz#17b09581988979fddafe0201e931ba933c96cbb4" @@ -6281,7 +6388,7 @@ node-gyp-build@<4.0, node-gyp-build@^3.9.0: resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-3.9.0.tgz#53a350187dd4d5276750da21605d1cb681d09e25" integrity sha512-zLcTg6P4AbcHPq465ZMFNXx7XpKKJh+7kkN699NiQWisR2uWYOWNWqRHAmbnmKiL4e9aLSlmy5U7rEMUXV59+A== -node-gyp-build@^4.2.0, node-gyp-build@^4.3.0, node-gyp-build@^4.5.0: +node-gyp-build@^4.2.0, node-gyp-build@^4.3.0, node-gyp-build@^4.5.0, node-gyp-build@^4.8.0: version "4.8.0" resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.0.tgz#3fee9c1731df4581a3f9ead74664369ff00d26dd" integrity sha512-u6fs2AEUljNho3EYTJNBfImO5QTo/J/1Etd+NVdCj7qWKUSN/bSLkZwhDv7I+w/MSC6qJ4cknepkAYykDdK8og== @@ -6297,9 +6404,9 @@ node-releases@^2.0.14: integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw== nodemon@^3.0.1: - version "3.0.3" - resolved "https://registry.yarnpkg.com/nodemon/-/nodemon-3.0.3.tgz#244a62d1c690eece3f6165c6cdb0db03ebd80b76" - integrity sha512-7jH/NXbFPxVaMwmBCC2B9F/V6X1VkEdNgx3iu9jji8WxWcvhMWkmhNWhI5077zknOnZnBzba9hZP6bCPJLSReQ== + version "3.1.0" + resolved "https://registry.yarnpkg.com/nodemon/-/nodemon-3.1.0.tgz#ff7394f2450eb6a5e96fe4180acd5176b29799c9" + integrity sha512-xqlktYlDMCepBJd43ZQhjWwMw2obW/JRvkrLxq5RCNcuDDX1DbcPT+qT1IlIIdf+DhnWs90JpTMe+Y5KxOchvA== dependencies: chokidar "^3.5.2" debug "^4" @@ -6359,7 +6466,7 @@ object-keys@^1.1.1: resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== -object.assign@^4.1.2, object.assign@^4.1.4: +object.assign@^4.1.2, object.assign@^4.1.5: version "4.1.5" resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== @@ -6370,42 +6477,41 @@ object.assign@^4.1.2, object.assign@^4.1.4: object-keys "^1.1.1" object.entries@^1.1.5: - version "1.1.7" - resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.7.tgz#2b47760e2a2e3a752f39dd874655c61a7f03c131" - integrity sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA== + version "1.1.8" + resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.8.tgz#bffe6f282e01f4d17807204a24f8edd823599c41" + integrity sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" object.fromentries@^2.0.7: - version "2.0.7" - resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.7.tgz#71e95f441e9a0ea6baf682ecaaf37fa2a8d7e616" - integrity sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA== + version "2.0.8" + resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.8.tgz#f7195d8a9b97bd95cbc1999ea939ecd1a2b00c65" + integrity sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.2" + es-object-atoms "^1.0.0" object.groupby@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/object.groupby/-/object.groupby-1.0.2.tgz#494800ff5bab78fd0eff2835ec859066e00192ec" - integrity sha512-bzBq58S+x+uo0VjurFT0UktpKHOZmv4/xePiOA1nbB9pMqpGK7rUPNgf+1YC+7mE+0HzhTMqNUuCqvKhj6FnBw== + version "1.0.3" + resolved "https://registry.yarnpkg.com/object.groupby/-/object.groupby-1.0.3.tgz#9b125c36238129f6f7b61954a1e7176148d5002e" + integrity sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ== dependencies: - array.prototype.filter "^1.0.3" - call-bind "^1.0.5" + call-bind "^1.0.7" define-properties "^1.2.1" - es-abstract "^1.22.3" - es-errors "^1.0.0" + es-abstract "^1.23.2" object.values@^1.1.7: - version "1.1.7" - resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.7.tgz#617ed13272e7e1071b43973aa1655d9291b8442a" - integrity sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng== + version "1.2.0" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.2.0.tgz#65405a9d92cee68ac2d303002e0b8470a4d9ab1b" + integrity sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" oboe@2.1.5: version "2.1.5" @@ -6426,6 +6532,11 @@ on-finished@2.4.1: dependencies: ee-first "1.1.1" +on-headers@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== + once@^1.3.0, once@^1.3.1, once@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" @@ -6445,6 +6556,11 @@ opentracing@>=0.12.1: resolved "https://registry.yarnpkg.com/opentracing/-/opentracing-0.14.7.tgz#25d472bd0296dc0b64d7b94cbc995219031428f5" integrity sha512-vz9iS7MJ5+Bp1URw8Khvdyw1H/hGvzHWlKQ7eRrQojSCDL1/SrWfrY9QebLw97n2deyRtzHRC3MkQfVNUCo91Q== +optional@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/optional/-/optional-0.1.4.tgz#cdb1a9bedc737d2025f690ceeb50e049444fd5b3" + integrity sha512-gtvrrCfkE08wKcgXaVwQVgwEQ8vel2dc5DDBn9RLQZ3YtmtkBss6A2HY6BnJH4N/4Ku97Ri/SF8sNWE2225WJw== + optionator@^0.9.3: version "0.9.3" resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" @@ -6640,9 +6756,9 @@ pino-std-serializers@^6.0.0, pino-std-serializers@^6.2.2: integrity sha512-cHjPPsE+vhj/tnhCy/wiMh3M3z3h/j15zHQX+S9GkTBgqJuTuJzYJ4gUyACLhDaJ7kk9ba9iRDmbH2tJU03OiA== pino@*, pino@^8.15.6, pino@^8.17.1: - version "8.18.0" - resolved "https://registry.yarnpkg.com/pino/-/pino-8.18.0.tgz#f2bfbb4e827ed2049ee1e88372268efdcd1505f6" - integrity sha512-Mz/gKiRyuXu4HnpHgi1YWdHQCoWMufapzooisvFn78zl4dZciAxS+YeRkUxXl1ee/SzU80YCz1zpECCh4oC6Aw== + version "8.19.0" + resolved "https://registry.yarnpkg.com/pino/-/pino-8.19.0.tgz#ccc15ef736f103ec02cfbead0912bc436dc92ce4" + integrity sha512-oswmokxkav9bADfJ2ifrvfHUwad6MLp73Uat0IkQWY3iAw5xTRoznXbXksZs8oaOUMpmhVWD+PZogNzllWpJaA== dependencies: atomic-sleep "^1.0.0" fast-redact "^3.1.1" @@ -6668,10 +6784,15 @@ pkg-dir@^4.2.0: dependencies: find-up "^4.0.0" -pprof-format@^2.0.7: - version "2.0.7" - resolved "https://registry.yarnpkg.com/pprof-format/-/pprof-format-2.0.7.tgz#526e4361f8b37d16b2ec4bb0696b5292de5046a4" - integrity sha512-1qWaGAzwMpaXJP9opRa23nPnt2Egi7RMNoNBptEE/XwHbcn4fC2b/4U4bKc5arkGkIh2ZabpF2bEb+c5GNHEKA== +possible-typed-array-names@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" + integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + +pprof-format@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/pprof-format/-/pprof-format-2.1.0.tgz#acc8d7773bcf4faf0a3d3df11bceefba7ac06664" + integrity sha512-0+G5bHH0RNr8E5hoZo/zJYsL92MhkZjwrHp3O2IxmY8RJL9ooKeuZ8Tm0ZNBw5sGZ9TiM71sthTjWoR2Vf5/xw== prelude-ls@^1.2.1: version "1.2.1" @@ -6714,6 +6835,23 @@ process@^0.11.10: resolved "https://registry.yarnpkg.com/process/-/process-0.11.10.tgz#7332300e840161bda3e69a1d1d91a7d4bc16f182" integrity sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A== +prom-client@^15.1.0: + version "15.1.1" + resolved "https://registry.yarnpkg.com/prom-client/-/prom-client-15.1.1.tgz#71ba84371241acd173181b04a436782c246f3652" + integrity sha512-GVA2H96QCg2q71rjc3VYvSrVG7OpnJxyryC7dMzvfJfpJJHzQVwF3TJLfHzKORcwJpElWs1TwXLthlJAFJxq2A== + dependencies: + "@opentelemetry/api" "^1.4.0" + tdigest "^0.1.1" + +prometheus-gc-stats@^0.6.2: + version "0.6.5" + resolved "https://registry.yarnpkg.com/prometheus-gc-stats/-/prometheus-gc-stats-0.6.5.tgz#c24309f586f4483b67438f0464cd7e3074e023a0" + integrity sha512-VsmpEGHt9mMZqlhL+96gz2LsaXEgu2SXQ/tiEqIBLPoUTyPORDNsEiH9DPPZHChdkTTBw3GRV1wGvqdIg4EktQ== + dependencies: + optional "^0.1.3" + optionalDependencies: + gc-stats "^1.4.0" + prompts@^2.0.1: version "2.4.2" resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" @@ -6772,9 +6910,9 @@ punycode@^2.1.0, punycode@^2.1.1, punycode@^2.3.0: integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== pure-rand@^6.0.0: - version "6.0.4" - resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" - integrity sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA== + version "6.1.0" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" + integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== qs@6.11.0: version "6.11.0" @@ -6810,10 +6948,10 @@ range-parser@~1.2.1: resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== -raw-body@2.5.1: - version "2.5.1" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" - integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== +raw-body@2.5.2: + version "2.5.2" + resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a" + integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA== dependencies: bytes "3.1.2" http-errors "2.0.0" @@ -6908,14 +7046,15 @@ regenerator-runtime@^0.14.0: resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== -regexp.prototype.flags@^1.5.1: - version "1.5.1" - resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.1.tgz#90ce989138db209f81492edd734183ce99f9677e" - integrity sha512-sy6TXMN+hnP/wMy+ISxg3krXx7BAtWVO4UouuCN/ziM9UEne0euamVNafDfvC83bRNr95y0V5iijeDQFUNpvrg== +regexp.prototype.flags@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334" + integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - set-function-name "^2.0.0" + call-bind "^1.0.6" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.1" require-directory@^2.1.1: version "2.1.1" @@ -6966,6 +7105,14 @@ resolve@~1.19.0: is-core-module "^2.1.0" path-parse "^1.0.6" +response-time@^2.3.2: + version "2.3.2" + resolved "https://registry.yarnpkg.com/response-time/-/response-time-2.3.2.tgz#ffa71bab952d62f7c1d49b7434355fbc68dffc5a" + integrity sha512-MUIDaDQf+CVqflfTdQ5yam+aYCkXj1PY8fjlPDQ6ppxJlmgZb864pHtA750mayywNg8tx4rS7qH9JXd/OF+3gw== + dependencies: + depd "~1.1.0" + on-headers "~1.0.1" + rest-api-errors@^1.2.5: version "1.3.1" resolved "https://registry.yarnpkg.com/rest-api-errors/-/rest-api-errors-1.3.1.tgz#a206da021bcc908b716e2208063f09e052c49b40" @@ -7017,13 +7164,13 @@ rxjs@^7.0.0: dependencies: tslib "^2.1.0" -safe-array-concat@^1.0.1: - version "1.1.0" - resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.0.tgz#8d0cae9cb806d6d1c06e08ab13d847293ebe0692" - integrity sha512-ZdQ0Jeb9Ofti4hbt5lX3T2JcAamT9hfzYU1MNB+z/jaEbB6wfFfPIR/zEORmZqobkCCJhSjodobH6WHNmJ97dg== +safe-array-concat@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb" + integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q== dependencies: - call-bind "^1.0.5" - get-intrinsic "^1.2.2" + call-bind "^1.0.7" + get-intrinsic "^1.2.4" has-symbols "^1.0.3" isarray "^2.0.5" @@ -7037,7 +7184,7 @@ safe-buffer@~5.1.2: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== -safe-regex-test@^1.0.0: +safe-regex-test@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377" integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw== @@ -7123,26 +7270,27 @@ serve-static@1.15.0: parseurl "~1.3.3" send "0.18.0" -set-function-length@^1.2.0: - version "1.2.1" - resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.1.tgz#47cc5945f2c771e2cf261c6737cf9684a2a5e425" - integrity sha512-j4t6ccc+VsKwYHso+kElc5neZpjtq9EnRICFZtWyBsLojhmeF/ZBd/elqm22WJh/BziDe/SBiOeAt0m2mfLD0g== +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== dependencies: - define-data-property "^1.1.2" + define-data-property "^1.1.4" es-errors "^1.3.0" function-bind "^1.1.2" - get-intrinsic "^1.2.3" + get-intrinsic "^1.2.4" gopd "^1.0.1" - has-property-descriptors "^1.0.1" + has-property-descriptors "^1.0.2" -set-function-name@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.1.tgz#12ce38b7954310b9f61faa12701620a0c882793a" - integrity sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA== +set-function-name@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== dependencies: - define-data-property "^1.0.1" + define-data-property "^1.1.4" + es-errors "^1.3.0" functions-have-names "^1.2.3" - has-property-descriptors "^1.0.0" + has-property-descriptors "^1.0.2" setimmediate@^1.0.5: version "1.0.5" @@ -7174,7 +7322,7 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== -shell-quote@^1.7.3: +shell-quote@^1.7.3, shell-quote@^1.8.1: version "1.8.1" resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.1.tgz#6dbf4db75515ad5bac63b4f1894c3a154c766680" integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== @@ -7190,11 +7338,11 @@ shiki@^0.14.7: vscode-textmate "^8.0.0" side-channel@^1.0.4: - version "1.0.5" - resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.5.tgz#9a84546599b48909fb6af1211708d23b1946221b" - integrity sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ== + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== dependencies: - call-bind "^1.0.6" + call-bind "^1.0.7" es-errors "^1.3.0" get-intrinsic "^1.2.4" object-inspect "^1.13.1" @@ -7232,11 +7380,11 @@ smart-buffer@^4.2.0: integrity sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg== socks@^2.7.1: - version "2.7.1" - resolved "https://registry.yarnpkg.com/socks/-/socks-2.7.1.tgz#d8e651247178fde79c0663043e07240196857d55" - integrity sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ== + version "2.8.1" + resolved "https://registry.yarnpkg.com/socks/-/socks-2.8.1.tgz#22c7d9dd7882649043cba0eafb49ae144e3457af" + integrity sha512-B6w7tkwNid7ToxjZ08rQMT8M9BJAf8DKx8Ft4NivzH0zBUfd6jldGcisJn/RLgxcX3FPNDdNQCUEMMT79b+oCQ== dependencies: - ip "^2.0.0" + ip-address "^9.0.5" smart-buffer "^4.2.0" sonic-boom@^3.0.0, sonic-boom@^3.7.0: @@ -7281,6 +7429,11 @@ split2@^4.0.0: resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4" integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg== +sprintf-js@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.1.3.tgz#4914b903a2f8b685d17fdf78a70e917e872e444a" + integrity sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA== + sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -7325,32 +7478,33 @@ string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: is-fullwidth-code-point "^3.0.0" strip-ansi "^6.0.1" -string.prototype.trim@^1.2.8: - version "1.2.8" - resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz#f9ac6f8af4bd55ddfa8895e6aea92a96395393bd" - integrity sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ== +string.prototype.trim@^1.2.9: + version "1.2.9" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz#b6fa326d72d2c78b6df02f7759c73f8f6274faa4" + integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.0" + es-object-atoms "^1.0.0" -string.prototype.trimend@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz#1bb3afc5008661d73e2dc015cd4853732d6c471e" - integrity sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA== +string.prototype.trimend@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz#3651b8513719e8a9f48de7f2f77640b26652b229" + integrity sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" string.prototype.trimstart@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz#d4cdb44b83a4737ffbac2d406e405d43d0184298" - integrity sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg== + version "1.0.8" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde" + integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg== dependencies: - call-bind "^1.0.2" - define-properties "^1.2.0" - es-abstract "^1.22.1" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-object-atoms "^1.0.0" string_decoder@^1.1.1, string_decoder@^1.3.0: version "1.3.0" @@ -7447,6 +7601,13 @@ table-layout@^1.0.2: typical "^5.2.0" wordwrapjs "^4.0.0" +tdigest@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/tdigest/-/tdigest-0.1.2.tgz#96c64bac4ff10746b910b0e23b515794e12faced" + integrity sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA== + dependencies: + bintrees "1.0.2" + test-exclude@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" @@ -7594,7 +7755,7 @@ tslib@^1.11.1, tslib@^1.8.1: resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.1.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0, tslib@^2.6.2: +tslib@^2.1.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.6.2: version "2.6.2" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== @@ -7636,11 +7797,6 @@ type-is@~1.6.18: media-typer "0.3.0" mime-types "~2.1.24" -type@^1.0.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/type/-/type-1.2.0.tgz#848dd7698dafa3e54a6c479e759c4bc3f18847a0" - integrity sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg== - type@^2.7.2: version "2.7.2" resolved "https://registry.yarnpkg.com/type/-/type-2.7.2.tgz#2376a15a3a28b1efa0f5350dcf72d24df6ef98d0" @@ -7662,44 +7818,49 @@ typechain@^8.1.1: ts-command-line-args "^2.2.0" ts-essentials "^7.0.1" -typed-array-buffer@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.1.tgz#0608ffe6bca71bf15a45bff0ca2604107a1325f5" - integrity sha512-RSqu1UEuSlrBhHTWC8O9FnPjOduNs4M7rJ4pRKoEjtx1zUNOPN2sSXHLDX+Y2WPbHIxbvg4JFo2DNAEfPIKWoQ== +typed-array-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3" + integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ== dependencies: - call-bind "^1.0.6" + call-bind "^1.0.7" es-errors "^1.3.0" is-typed-array "^1.1.13" -typed-array-byte-length@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz#d787a24a995711611fb2b87a4052799517b230d0" - integrity sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA== +typed-array-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67" + integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw== dependencies: - call-bind "^1.0.2" + call-bind "^1.0.7" for-each "^0.3.3" - has-proto "^1.0.1" - is-typed-array "^1.1.10" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" -typed-array-byte-offset@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz#cbbe89b51fdef9cd6aaf07ad4707340abbc4ea0b" - integrity sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg== +typed-array-byte-offset@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063" + integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA== dependencies: - available-typed-arrays "^1.0.5" - call-bind "^1.0.2" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" for-each "^0.3.3" - has-proto "^1.0.1" - is-typed-array "^1.1.10" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" -typed-array-length@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.4.tgz#89d83785e5c4098bec72e08b319651f0eac9c1bb" - integrity sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng== +typed-array-length@^1.0.5: + version "1.0.6" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3" + integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g== dependencies: - call-bind "^1.0.2" + call-bind "^1.0.7" for-each "^0.3.3" - is-typed-array "^1.1.9" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + possible-typed-array-names "^1.0.0" typedarray-to-buffer@^3.1.5: version "3.1.5" @@ -7709,9 +7870,9 @@ typedarray-to-buffer@^3.1.5: is-typedarray "^1.0.0" typedoc@^0.25.7: - version "0.25.8" - resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.25.8.tgz#7d0e1bf12d23bf1c459fd4893c82cb855911ff12" - integrity sha512-mh8oLW66nwmeB9uTa0Bdcjfis+48bAjSH3uqdzSuSawfduROQLlXw//WSNZLYDdhmMVB7YcYZicq6e8T0d271A== + version "0.25.12" + resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.25.12.tgz#f73f0a8d3731d418cc604d4230f95a857799e27a" + integrity sha512-F+qhkK2VoTweDXd1c42GS/By2DvI2uDF4/EpG424dTexSHdtCH52C6IcAvMA6jR3DzAWZjHpUOW+E02kyPNUNw== dependencies: lunr "^2.3.9" marked "^4.3.0" @@ -7719,9 +7880,9 @@ typedoc@^0.25.7: shiki "^0.14.7" typescript@^5.3.3: - version "5.3.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.3.3.tgz#b3ce6ba258e72e6305ba66f5c9b452aaee3ffe37" - integrity sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw== + version "5.4.3" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.3.tgz#5c6fedd4c87bee01cd7a528a30145521f8e0feff" + integrity sha512-KrPd3PKaCLr78MalgiwJnA25Nm8HAmdwN3mYUYZgG/wizIo9EainNVQI9/yDavtVFRN2h3k8uf3GLHuhDMgEHg== typical@^4.0.0: version "4.0.0" @@ -7791,6 +7952,11 @@ url-parse@~1.5.10: querystringify "^2.1.1" requires-port "^1.0.0" +url-value-parser@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/url-value-parser/-/url-value-parser-2.2.0.tgz#f38ae8cd24604ec69bc219d66929ddbbd93a2b32" + integrity sha512-yIQdxJpgkPamPPAPuGdS7Q548rLhny42tg8d4vyTNzFqvOnwqrgHXvgehT09U7fwrzxi3RxCiXjoNUNnNOlQ8A== + utf-8-validate@^5.0.2: version "5.0.10" resolved "https://registry.yarnpkg.com/utf-8-validate/-/utf-8-validate-5.0.10.tgz#d7d10ea39318171ca982718b6b96a8d2442571a2" @@ -7868,9 +8034,9 @@ viem@^1.21.4: ws "8.13.0" viem@^2.7.8: - version "2.7.9" - resolved "https://registry.yarnpkg.com/viem/-/viem-2.7.9.tgz#0d2b0c4722530b53fbef449d70f0cedc1bb867b0" - integrity sha512-iDfc8TwaZFp1K95zlsxYh6Cs0OWCt35Tqs8uYgXKSxtz7w075mZ0H5SJ8zSyJGoEaticVDhtdmRRX6TtcW9EeQ== + version "2.9.3" + resolved "https://registry.yarnpkg.com/viem/-/viem-2.9.3.tgz#7de5253b9a9907b4db0f3a4444677b2cdf743589" + integrity sha512-HVw0JLFCAerIt2pavToQdFS73a42L1Lhenh7YN8tIOtOdr/N7FhOqbzs3+PuEBJiRy/MJVrz9aWb5smulaTcWQ== dependencies: "@adraffy/ens-normalize" "1.10.0" "@noble/curves" "1.2.0" @@ -8062,16 +8228,16 @@ which-boxed-primitive@^1.0.2: is-string "^1.0.5" is-symbol "^1.0.3" -which-typed-array@^1.1.13, which-typed-array@^1.1.14, which-typed-array@^1.1.2: - version "1.1.14" - resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.14.tgz#1f78a111aee1e131ca66164d8bdc3ab062c95a06" - integrity sha512-VnXFiIW8yNn9kIHN88xvZ4yOWchftKDsRJ8fEPacX/wl1lOvBrhsJ/OeJCXq7B0AaijRuqgzSKalJoPk+D8MPg== +which-typed-array@^1.1.14, which-typed-array@^1.1.15, which-typed-array@^1.1.2: + version "1.1.15" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" + integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== dependencies: - available-typed-arrays "^1.0.6" - call-bind "^1.0.5" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" for-each "^0.3.3" gopd "^1.0.1" - has-tostringtag "^1.0.1" + has-tostringtag "^1.0.2" which@^2.0.1: version "2.0.2"